Subversion Repositories Kolibri OS

Rev

Rev 5097 | Rev 6084 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 5097 Rev 5354
1
/*
1
/*
2
 * Copyright © 2006-2007 Intel Corporation
2
 * Copyright © 2006-2007 Intel Corporation
3
 *
3
 *
4
 * Permission is hereby granted, free of charge, to any person obtaining a
4
 * Permission is hereby granted, free of charge, to any person obtaining a
5
 * copy of this software and associated documentation files (the "Software"),
5
 * copy of this software and associated documentation files (the "Software"),
6
 * to deal in the Software without restriction, including without limitation
6
 * to deal in the Software without restriction, including without limitation
7
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
7
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
 * and/or sell copies of the Software, and to permit persons to whom the
8
 * and/or sell copies of the Software, and to permit persons to whom the
9
 * Software is furnished to do so, subject to the following conditions:
9
 * Software is furnished to do so, subject to the following conditions:
10
 *
10
 *
11
 * The above copyright notice and this permission notice (including the next
11
 * The above copyright notice and this permission notice (including the next
12
 * paragraph) shall be included in all copies or substantial portions of the
12
 * paragraph) shall be included in all copies or substantial portions of the
13
 * Software.
13
 * Software.
14
 *
14
 *
15
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21
 * DEALINGS IN THE SOFTWARE.
21
 * DEALINGS IN THE SOFTWARE.
22
 *
22
 *
23
 * Authors:
23
 * Authors:
24
 *  Eric Anholt 
24
 *  Eric Anholt 
25
 */
25
 */
26
 
26
 
27
#include 
27
#include 
28
#include 
28
#include 
29
//#include 
29
//#include 
30
#include 
30
#include 
31
#include 
31
#include 
32
#include 
32
#include 
33
#include 
33
#include 
34
#include 
34
#include 
35
#include 
35
#include 
36
#include "intel_drv.h"
36
#include "intel_drv.h"
37
#include 
37
#include 
38
#include "i915_drv.h"
38
#include "i915_drv.h"
39
#include "i915_trace.h"
39
#include "i915_trace.h"
40
#include 
40
#include 
41
#include 
41
#include 
42
#include 
42
#include 
43
#include 
43
#include 
44
#include 
44
#include 
45
 
-
 
46
static inline void ndelay(unsigned long x)
-
 
47
{
-
 
48
    udelay(DIV_ROUND_UP(x, 1000));
-
 
49
}
-
 
50
 
45
 
51
/* Primary plane formats supported by all gen */
46
/* Primary plane formats supported by all gen */
52
#define COMMON_PRIMARY_FORMATS \
47
#define COMMON_PRIMARY_FORMATS \
53
	DRM_FORMAT_C8, \
48
	DRM_FORMAT_C8, \
54
	DRM_FORMAT_RGB565, \
49
	DRM_FORMAT_RGB565, \
55
	DRM_FORMAT_XRGB8888, \
50
	DRM_FORMAT_XRGB8888, \
56
	DRM_FORMAT_ARGB8888
51
	DRM_FORMAT_ARGB8888
57
 
52
 
58
/* Primary plane formats for gen <= 3 */
53
/* Primary plane formats for gen <= 3 */
59
static const uint32_t intel_primary_formats_gen2[] = {
54
static const uint32_t intel_primary_formats_gen2[] = {
60
	COMMON_PRIMARY_FORMATS,
55
	COMMON_PRIMARY_FORMATS,
61
	DRM_FORMAT_XRGB1555,
56
	DRM_FORMAT_XRGB1555,
62
	DRM_FORMAT_ARGB1555,
57
	DRM_FORMAT_ARGB1555,
63
};
58
};
64
 
59
 
65
/* Primary plane formats for gen >= 4 */
60
/* Primary plane formats for gen >= 4 */
66
static const uint32_t intel_primary_formats_gen4[] = {
61
static const uint32_t intel_primary_formats_gen4[] = {
67
	COMMON_PRIMARY_FORMATS, \
62
	COMMON_PRIMARY_FORMATS, \
68
	DRM_FORMAT_XBGR8888,
63
	DRM_FORMAT_XBGR8888,
69
	DRM_FORMAT_ABGR8888,
64
	DRM_FORMAT_ABGR8888,
70
	DRM_FORMAT_XRGB2101010,
65
	DRM_FORMAT_XRGB2101010,
71
	DRM_FORMAT_ARGB2101010,
66
	DRM_FORMAT_ARGB2101010,
72
	DRM_FORMAT_XBGR2101010,
67
	DRM_FORMAT_XBGR2101010,
73
	DRM_FORMAT_ABGR2101010,
68
	DRM_FORMAT_ABGR2101010,
74
};
69
};
75
 
70
 
76
/* Cursor formats */
71
/* Cursor formats */
77
static const uint32_t intel_cursor_formats[] = {
72
static const uint32_t intel_cursor_formats[] = {
78
	DRM_FORMAT_ARGB8888,
73
	DRM_FORMAT_ARGB8888,
79
};
74
};
80
 
-
 
81
#define DIV_ROUND_CLOSEST_ULL(ll, d)	\
-
 
82
({ unsigned long long _tmp = (ll)+(d)/2; do_div(_tmp, d); _tmp; })
-
 
83
 
-
 
84
#define MAX_ERRNO       4095
-
 
85
phys_addr_t get_bus_addr(void);
-
 
86
 
-
 
87
static inline void outb(u8 v, u16 port)
-
 
88
{
-
 
89
    asm volatile("outb %0,%1" : : "a" (v), "dN" (port));
-
 
90
}
-
 
91
static inline u8 inb(u16 port)
-
 
92
{
-
 
93
    u8 v;
-
 
94
    asm volatile("inb %1,%0" : "=a" (v) : "dN" (port));
-
 
95
    return v;
-
 
96
}
-
 
97
 
-
 
98
static void intel_increase_pllclock(struct drm_device *dev,
-
 
99
				    enum pipe pipe);
75
 
100
void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
76
void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
101
 
77
 
102
static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
78
static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
103
				struct intel_crtc_config *pipe_config);
79
				struct intel_crtc_config *pipe_config);
104
static void ironlake_pch_clock_get(struct intel_crtc *crtc,
80
static void ironlake_pch_clock_get(struct intel_crtc *crtc,
105
				    struct intel_crtc_config *pipe_config);
81
				    struct intel_crtc_config *pipe_config);
106
 
82
 
107
static int intel_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode,
83
static int intel_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode,
108
			  int x, int y, struct drm_framebuffer *old_fb);
84
			  int x, int y, struct drm_framebuffer *old_fb);
109
static int intel_framebuffer_init(struct drm_device *dev,
85
static int intel_framebuffer_init(struct drm_device *dev,
110
				  struct intel_framebuffer *ifb,
86
				  struct intel_framebuffer *ifb,
111
				  struct drm_mode_fb_cmd2 *mode_cmd,
87
				  struct drm_mode_fb_cmd2 *mode_cmd,
112
				  struct drm_i915_gem_object *obj);
88
				  struct drm_i915_gem_object *obj);
113
static void intel_dp_set_m_n(struct intel_crtc *crtc);
-
 
114
static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc);
89
static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc);
115
static void intel_set_pipe_timings(struct intel_crtc *intel_crtc);
90
static void intel_set_pipe_timings(struct intel_crtc *intel_crtc);
116
static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
91
static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
-
 
92
					 struct intel_link_m_n *m_n,
117
					 struct intel_link_m_n *m_n);
93
					 struct intel_link_m_n *m2_n2);
118
static void ironlake_set_pipeconf(struct drm_crtc *crtc);
94
static void ironlake_set_pipeconf(struct drm_crtc *crtc);
119
static void haswell_set_pipeconf(struct drm_crtc *crtc);
95
static void haswell_set_pipeconf(struct drm_crtc *crtc);
120
static void intel_set_pipe_csc(struct drm_crtc *crtc);
96
static void intel_set_pipe_csc(struct drm_crtc *crtc);
121
static void vlv_prepare_pll(struct intel_crtc *crtc);
97
static void vlv_prepare_pll(struct intel_crtc *crtc,
-
 
98
			    const struct intel_crtc_config *pipe_config);
-
 
99
static void chv_prepare_pll(struct intel_crtc *crtc,
-
 
100
			    const struct intel_crtc_config *pipe_config);
122
 
101
 
123
static struct intel_encoder *intel_find_encoder(struct intel_connector *connector, int pipe)
102
static struct intel_encoder *intel_find_encoder(struct intel_connector *connector, int pipe)
124
{
103
{
125
	if (!connector->mst_port)
104
	if (!connector->mst_port)
126
		return connector->encoder;
105
		return connector->encoder;
127
	else
106
	else
128
		return &connector->mst_port->mst_encoders[pipe]->base;
107
		return &connector->mst_port->mst_encoders[pipe]->base;
129
}
108
}
130
 
109
 
131
typedef struct {
110
typedef struct {
132
    int min, max;
111
    int min, max;
133
} intel_range_t;
112
} intel_range_t;
134
 
113
 
135
typedef struct {
114
typedef struct {
136
    int dot_limit;
115
    int dot_limit;
137
    int p2_slow, p2_fast;
116
    int p2_slow, p2_fast;
138
} intel_p2_t;
117
} intel_p2_t;
139
 
118
 
140
typedef struct intel_limit intel_limit_t;
119
typedef struct intel_limit intel_limit_t;
141
struct intel_limit {
120
struct intel_limit {
142
    intel_range_t   dot, vco, n, m, m1, m2, p, p1;
121
    intel_range_t   dot, vco, n, m, m1, m2, p, p1;
143
    intel_p2_t      p2;
122
    intel_p2_t      p2;
144
};
123
};
145
 
124
 
146
int
125
int
147
intel_pch_rawclk(struct drm_device *dev)
126
intel_pch_rawclk(struct drm_device *dev)
148
{
127
{
149
	struct drm_i915_private *dev_priv = dev->dev_private;
128
	struct drm_i915_private *dev_priv = dev->dev_private;
150
 
129
 
151
	WARN_ON(!HAS_PCH_SPLIT(dev));
130
	WARN_ON(!HAS_PCH_SPLIT(dev));
152
 
131
 
153
	return I915_READ(PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK;
132
	return I915_READ(PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK;
154
}
133
}
155
 
134
 
156
static inline u32 /* units of 100MHz */
135
static inline u32 /* units of 100MHz */
157
intel_fdi_link_freq(struct drm_device *dev)
136
intel_fdi_link_freq(struct drm_device *dev)
158
{
137
{
159
	if (IS_GEN5(dev)) {
138
	if (IS_GEN5(dev)) {
160
		struct drm_i915_private *dev_priv = dev->dev_private;
139
		struct drm_i915_private *dev_priv = dev->dev_private;
161
		return (I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2;
140
		return (I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2;
162
	} else
141
	} else
163
		return 27;
142
		return 27;
164
}
143
}
165
 
144
 
166
static const intel_limit_t intel_limits_i8xx_dac = {
145
static const intel_limit_t intel_limits_i8xx_dac = {
167
	.dot = { .min = 25000, .max = 350000 },
146
	.dot = { .min = 25000, .max = 350000 },
168
	.vco = { .min = 908000, .max = 1512000 },
147
	.vco = { .min = 908000, .max = 1512000 },
169
	.n = { .min = 2, .max = 16 },
148
	.n = { .min = 2, .max = 16 },
170
	.m = { .min = 96, .max = 140 },
149
	.m = { .min = 96, .max = 140 },
171
	.m1 = { .min = 18, .max = 26 },
150
	.m1 = { .min = 18, .max = 26 },
172
	.m2 = { .min = 6, .max = 16 },
151
	.m2 = { .min = 6, .max = 16 },
173
	.p = { .min = 4, .max = 128 },
152
	.p = { .min = 4, .max = 128 },
174
	.p1 = { .min = 2, .max = 33 },
153
	.p1 = { .min = 2, .max = 33 },
175
	.p2 = { .dot_limit = 165000,
154
	.p2 = { .dot_limit = 165000,
176
		.p2_slow = 4, .p2_fast = 2 },
155
		.p2_slow = 4, .p2_fast = 2 },
177
};
156
};
178
 
157
 
179
static const intel_limit_t intel_limits_i8xx_dvo = {
158
static const intel_limit_t intel_limits_i8xx_dvo = {
180
        .dot = { .min = 25000, .max = 350000 },
159
        .dot = { .min = 25000, .max = 350000 },
181
	.vco = { .min = 908000, .max = 1512000 },
160
	.vco = { .min = 908000, .max = 1512000 },
182
	.n = { .min = 2, .max = 16 },
161
	.n = { .min = 2, .max = 16 },
183
        .m = { .min = 96, .max = 140 },
162
        .m = { .min = 96, .max = 140 },
184
        .m1 = { .min = 18, .max = 26 },
163
        .m1 = { .min = 18, .max = 26 },
185
        .m2 = { .min = 6, .max = 16 },
164
        .m2 = { .min = 6, .max = 16 },
186
        .p = { .min = 4, .max = 128 },
165
        .p = { .min = 4, .max = 128 },
187
        .p1 = { .min = 2, .max = 33 },
166
        .p1 = { .min = 2, .max = 33 },
188
	.p2 = { .dot_limit = 165000,
167
	.p2 = { .dot_limit = 165000,
189
		.p2_slow = 4, .p2_fast = 4 },
168
		.p2_slow = 4, .p2_fast = 4 },
190
};
169
};
191
 
170
 
192
static const intel_limit_t intel_limits_i8xx_lvds = {
171
static const intel_limit_t intel_limits_i8xx_lvds = {
193
        .dot = { .min = 25000, .max = 350000 },
172
        .dot = { .min = 25000, .max = 350000 },
194
	.vco = { .min = 908000, .max = 1512000 },
173
	.vco = { .min = 908000, .max = 1512000 },
195
	.n = { .min = 2, .max = 16 },
174
	.n = { .min = 2, .max = 16 },
196
        .m = { .min = 96, .max = 140 },
175
        .m = { .min = 96, .max = 140 },
197
        .m1 = { .min = 18, .max = 26 },
176
        .m1 = { .min = 18, .max = 26 },
198
        .m2 = { .min = 6, .max = 16 },
177
        .m2 = { .min = 6, .max = 16 },
199
        .p = { .min = 4, .max = 128 },
178
        .p = { .min = 4, .max = 128 },
200
        .p1 = { .min = 1, .max = 6 },
179
        .p1 = { .min = 1, .max = 6 },
201
	.p2 = { .dot_limit = 165000,
180
	.p2 = { .dot_limit = 165000,
202
		.p2_slow = 14, .p2_fast = 7 },
181
		.p2_slow = 14, .p2_fast = 7 },
203
};
182
};
204
 
183
 
205
static const intel_limit_t intel_limits_i9xx_sdvo = {
184
static const intel_limit_t intel_limits_i9xx_sdvo = {
206
        .dot = { .min = 20000, .max = 400000 },
185
        .dot = { .min = 20000, .max = 400000 },
207
        .vco = { .min = 1400000, .max = 2800000 },
186
        .vco = { .min = 1400000, .max = 2800000 },
208
        .n = { .min = 1, .max = 6 },
187
        .n = { .min = 1, .max = 6 },
209
        .m = { .min = 70, .max = 120 },
188
        .m = { .min = 70, .max = 120 },
210
	.m1 = { .min = 8, .max = 18 },
189
	.m1 = { .min = 8, .max = 18 },
211
	.m2 = { .min = 3, .max = 7 },
190
	.m2 = { .min = 3, .max = 7 },
212
        .p = { .min = 5, .max = 80 },
191
        .p = { .min = 5, .max = 80 },
213
        .p1 = { .min = 1, .max = 8 },
192
        .p1 = { .min = 1, .max = 8 },
214
	.p2 = { .dot_limit = 200000,
193
	.p2 = { .dot_limit = 200000,
215
		.p2_slow = 10, .p2_fast = 5 },
194
		.p2_slow = 10, .p2_fast = 5 },
216
};
195
};
217
 
196
 
218
static const intel_limit_t intel_limits_i9xx_lvds = {
197
static const intel_limit_t intel_limits_i9xx_lvds = {
219
        .dot = { .min = 20000, .max = 400000 },
198
        .dot = { .min = 20000, .max = 400000 },
220
        .vco = { .min = 1400000, .max = 2800000 },
199
        .vco = { .min = 1400000, .max = 2800000 },
221
        .n = { .min = 1, .max = 6 },
200
        .n = { .min = 1, .max = 6 },
222
        .m = { .min = 70, .max = 120 },
201
        .m = { .min = 70, .max = 120 },
223
	.m1 = { .min = 8, .max = 18 },
202
	.m1 = { .min = 8, .max = 18 },
224
	.m2 = { .min = 3, .max = 7 },
203
	.m2 = { .min = 3, .max = 7 },
225
        .p = { .min = 7, .max = 98 },
204
        .p = { .min = 7, .max = 98 },
226
        .p1 = { .min = 1, .max = 8 },
205
        .p1 = { .min = 1, .max = 8 },
227
	.p2 = { .dot_limit = 112000,
206
	.p2 = { .dot_limit = 112000,
228
		.p2_slow = 14, .p2_fast = 7 },
207
		.p2_slow = 14, .p2_fast = 7 },
229
};
208
};
230
 
209
 
231
 
210
 
232
static const intel_limit_t intel_limits_g4x_sdvo = {
211
static const intel_limit_t intel_limits_g4x_sdvo = {
233
	.dot = { .min = 25000, .max = 270000 },
212
	.dot = { .min = 25000, .max = 270000 },
234
	.vco = { .min = 1750000, .max = 3500000},
213
	.vco = { .min = 1750000, .max = 3500000},
235
	.n = { .min = 1, .max = 4 },
214
	.n = { .min = 1, .max = 4 },
236
	.m = { .min = 104, .max = 138 },
215
	.m = { .min = 104, .max = 138 },
237
	.m1 = { .min = 17, .max = 23 },
216
	.m1 = { .min = 17, .max = 23 },
238
	.m2 = { .min = 5, .max = 11 },
217
	.m2 = { .min = 5, .max = 11 },
239
	.p = { .min = 10, .max = 30 },
218
	.p = { .min = 10, .max = 30 },
240
	.p1 = { .min = 1, .max = 3},
219
	.p1 = { .min = 1, .max = 3},
241
	.p2 = { .dot_limit = 270000,
220
	.p2 = { .dot_limit = 270000,
242
		.p2_slow = 10,
221
		.p2_slow = 10,
243
		.p2_fast = 10
222
		.p2_fast = 10
244
	},
223
	},
245
};
224
};
246
 
225
 
247
static const intel_limit_t intel_limits_g4x_hdmi = {
226
static const intel_limit_t intel_limits_g4x_hdmi = {
248
	.dot = { .min = 22000, .max = 400000 },
227
	.dot = { .min = 22000, .max = 400000 },
249
	.vco = { .min = 1750000, .max = 3500000},
228
	.vco = { .min = 1750000, .max = 3500000},
250
	.n = { .min = 1, .max = 4 },
229
	.n = { .min = 1, .max = 4 },
251
	.m = { .min = 104, .max = 138 },
230
	.m = { .min = 104, .max = 138 },
252
	.m1 = { .min = 16, .max = 23 },
231
	.m1 = { .min = 16, .max = 23 },
253
	.m2 = { .min = 5, .max = 11 },
232
	.m2 = { .min = 5, .max = 11 },
254
	.p = { .min = 5, .max = 80 },
233
	.p = { .min = 5, .max = 80 },
255
	.p1 = { .min = 1, .max = 8},
234
	.p1 = { .min = 1, .max = 8},
256
	.p2 = { .dot_limit = 165000,
235
	.p2 = { .dot_limit = 165000,
257
		.p2_slow = 10, .p2_fast = 5 },
236
		.p2_slow = 10, .p2_fast = 5 },
258
};
237
};
259
 
238
 
260
static const intel_limit_t intel_limits_g4x_single_channel_lvds = {
239
static const intel_limit_t intel_limits_g4x_single_channel_lvds = {
261
	.dot = { .min = 20000, .max = 115000 },
240
	.dot = { .min = 20000, .max = 115000 },
262
	.vco = { .min = 1750000, .max = 3500000 },
241
	.vco = { .min = 1750000, .max = 3500000 },
263
	.n = { .min = 1, .max = 3 },
242
	.n = { .min = 1, .max = 3 },
264
	.m = { .min = 104, .max = 138 },
243
	.m = { .min = 104, .max = 138 },
265
	.m1 = { .min = 17, .max = 23 },
244
	.m1 = { .min = 17, .max = 23 },
266
	.m2 = { .min = 5, .max = 11 },
245
	.m2 = { .min = 5, .max = 11 },
267
	.p = { .min = 28, .max = 112 },
246
	.p = { .min = 28, .max = 112 },
268
	.p1 = { .min = 2, .max = 8 },
247
	.p1 = { .min = 2, .max = 8 },
269
	.p2 = { .dot_limit = 0,
248
	.p2 = { .dot_limit = 0,
270
		.p2_slow = 14, .p2_fast = 14
249
		.p2_slow = 14, .p2_fast = 14
271
	},
250
	},
272
};
251
};
273
 
252
 
274
static const intel_limit_t intel_limits_g4x_dual_channel_lvds = {
253
static const intel_limit_t intel_limits_g4x_dual_channel_lvds = {
275
	.dot = { .min = 80000, .max = 224000 },
254
	.dot = { .min = 80000, .max = 224000 },
276
	.vco = { .min = 1750000, .max = 3500000 },
255
	.vco = { .min = 1750000, .max = 3500000 },
277
	.n = { .min = 1, .max = 3 },
256
	.n = { .min = 1, .max = 3 },
278
	.m = { .min = 104, .max = 138 },
257
	.m = { .min = 104, .max = 138 },
279
	.m1 = { .min = 17, .max = 23 },
258
	.m1 = { .min = 17, .max = 23 },
280
	.m2 = { .min = 5, .max = 11 },
259
	.m2 = { .min = 5, .max = 11 },
281
	.p = { .min = 14, .max = 42 },
260
	.p = { .min = 14, .max = 42 },
282
	.p1 = { .min = 2, .max = 6 },
261
	.p1 = { .min = 2, .max = 6 },
283
	.p2 = { .dot_limit = 0,
262
	.p2 = { .dot_limit = 0,
284
		.p2_slow = 7, .p2_fast = 7
263
		.p2_slow = 7, .p2_fast = 7
285
	},
264
	},
286
};
265
};
287
 
266
 
288
static const intel_limit_t intel_limits_pineview_sdvo = {
267
static const intel_limit_t intel_limits_pineview_sdvo = {
289
        .dot = { .min = 20000, .max = 400000},
268
        .dot = { .min = 20000, .max = 400000},
290
        .vco = { .min = 1700000, .max = 3500000 },
269
        .vco = { .min = 1700000, .max = 3500000 },
291
	/* Pineview's Ncounter is a ring counter */
270
	/* Pineview's Ncounter is a ring counter */
292
        .n = { .min = 3, .max = 6 },
271
        .n = { .min = 3, .max = 6 },
293
        .m = { .min = 2, .max = 256 },
272
        .m = { .min = 2, .max = 256 },
294
	/* Pineview only has one combined m divider, which we treat as m2. */
273
	/* Pineview only has one combined m divider, which we treat as m2. */
295
        .m1 = { .min = 0, .max = 0 },
274
        .m1 = { .min = 0, .max = 0 },
296
        .m2 = { .min = 0, .max = 254 },
275
        .m2 = { .min = 0, .max = 254 },
297
        .p = { .min = 5, .max = 80 },
276
        .p = { .min = 5, .max = 80 },
298
        .p1 = { .min = 1, .max = 8 },
277
        .p1 = { .min = 1, .max = 8 },
299
	.p2 = { .dot_limit = 200000,
278
	.p2 = { .dot_limit = 200000,
300
		.p2_slow = 10, .p2_fast = 5 },
279
		.p2_slow = 10, .p2_fast = 5 },
301
};
280
};
302
 
281
 
303
static const intel_limit_t intel_limits_pineview_lvds = {
282
static const intel_limit_t intel_limits_pineview_lvds = {
304
        .dot = { .min = 20000, .max = 400000 },
283
        .dot = { .min = 20000, .max = 400000 },
305
        .vco = { .min = 1700000, .max = 3500000 },
284
        .vco = { .min = 1700000, .max = 3500000 },
306
        .n = { .min = 3, .max = 6 },
285
        .n = { .min = 3, .max = 6 },
307
        .m = { .min = 2, .max = 256 },
286
        .m = { .min = 2, .max = 256 },
308
        .m1 = { .min = 0, .max = 0 },
287
        .m1 = { .min = 0, .max = 0 },
309
        .m2 = { .min = 0, .max = 254 },
288
        .m2 = { .min = 0, .max = 254 },
310
        .p = { .min = 7, .max = 112 },
289
        .p = { .min = 7, .max = 112 },
311
        .p1 = { .min = 1, .max = 8 },
290
        .p1 = { .min = 1, .max = 8 },
312
	.p2 = { .dot_limit = 112000,
291
	.p2 = { .dot_limit = 112000,
313
		.p2_slow = 14, .p2_fast = 14 },
292
		.p2_slow = 14, .p2_fast = 14 },
314
};
293
};
315
 
294
 
316
/* Ironlake / Sandybridge
295
/* Ironlake / Sandybridge
317
 *
296
 *
318
 * We calculate clock using (register_value + 2) for N/M1/M2, so here
297
 * We calculate clock using (register_value + 2) for N/M1/M2, so here
319
 * the range value for them is (actual_value - 2).
298
 * the range value for them is (actual_value - 2).
320
 */
299
 */
321
static const intel_limit_t intel_limits_ironlake_dac = {
300
static const intel_limit_t intel_limits_ironlake_dac = {
322
	.dot = { .min = 25000, .max = 350000 },
301
	.dot = { .min = 25000, .max = 350000 },
323
	.vco = { .min = 1760000, .max = 3510000 },
302
	.vco = { .min = 1760000, .max = 3510000 },
324
	.n = { .min = 1, .max = 5 },
303
	.n = { .min = 1, .max = 5 },
325
	.m = { .min = 79, .max = 127 },
304
	.m = { .min = 79, .max = 127 },
326
	.m1 = { .min = 12, .max = 22 },
305
	.m1 = { .min = 12, .max = 22 },
327
	.m2 = { .min = 5, .max = 9 },
306
	.m2 = { .min = 5, .max = 9 },
328
	.p = { .min = 5, .max = 80 },
307
	.p = { .min = 5, .max = 80 },
329
	.p1 = { .min = 1, .max = 8 },
308
	.p1 = { .min = 1, .max = 8 },
330
	.p2 = { .dot_limit = 225000,
309
	.p2 = { .dot_limit = 225000,
331
		.p2_slow = 10, .p2_fast = 5 },
310
		.p2_slow = 10, .p2_fast = 5 },
332
};
311
};
333
 
312
 
334
static const intel_limit_t intel_limits_ironlake_single_lvds = {
313
static const intel_limit_t intel_limits_ironlake_single_lvds = {
335
	.dot = { .min = 25000, .max = 350000 },
314
	.dot = { .min = 25000, .max = 350000 },
336
	.vco = { .min = 1760000, .max = 3510000 },
315
	.vco = { .min = 1760000, .max = 3510000 },
337
	.n = { .min = 1, .max = 3 },
316
	.n = { .min = 1, .max = 3 },
338
	.m = { .min = 79, .max = 118 },
317
	.m = { .min = 79, .max = 118 },
339
	.m1 = { .min = 12, .max = 22 },
318
	.m1 = { .min = 12, .max = 22 },
340
	.m2 = { .min = 5, .max = 9 },
319
	.m2 = { .min = 5, .max = 9 },
341
	.p = { .min = 28, .max = 112 },
320
	.p = { .min = 28, .max = 112 },
342
	.p1 = { .min = 2, .max = 8 },
321
	.p1 = { .min = 2, .max = 8 },
343
	.p2 = { .dot_limit = 225000,
322
	.p2 = { .dot_limit = 225000,
344
		.p2_slow = 14, .p2_fast = 14 },
323
		.p2_slow = 14, .p2_fast = 14 },
345
};
324
};
346
 
325
 
347
static const intel_limit_t intel_limits_ironlake_dual_lvds = {
326
static const intel_limit_t intel_limits_ironlake_dual_lvds = {
348
	.dot = { .min = 25000, .max = 350000 },
327
	.dot = { .min = 25000, .max = 350000 },
349
	.vco = { .min = 1760000, .max = 3510000 },
328
	.vco = { .min = 1760000, .max = 3510000 },
350
	.n = { .min = 1, .max = 3 },
329
	.n = { .min = 1, .max = 3 },
351
	.m = { .min = 79, .max = 127 },
330
	.m = { .min = 79, .max = 127 },
352
	.m1 = { .min = 12, .max = 22 },
331
	.m1 = { .min = 12, .max = 22 },
353
	.m2 = { .min = 5, .max = 9 },
332
	.m2 = { .min = 5, .max = 9 },
354
	.p = { .min = 14, .max = 56 },
333
	.p = { .min = 14, .max = 56 },
355
	.p1 = { .min = 2, .max = 8 },
334
	.p1 = { .min = 2, .max = 8 },
356
	.p2 = { .dot_limit = 225000,
335
	.p2 = { .dot_limit = 225000,
357
		.p2_slow = 7, .p2_fast = 7 },
336
		.p2_slow = 7, .p2_fast = 7 },
358
};
337
};
359
 
338
 
360
/* LVDS 100mhz refclk limits. */
339
/* LVDS 100mhz refclk limits. */
361
static const intel_limit_t intel_limits_ironlake_single_lvds_100m = {
340
static const intel_limit_t intel_limits_ironlake_single_lvds_100m = {
362
	.dot = { .min = 25000, .max = 350000 },
341
	.dot = { .min = 25000, .max = 350000 },
363
	.vco = { .min = 1760000, .max = 3510000 },
342
	.vco = { .min = 1760000, .max = 3510000 },
364
	.n = { .min = 1, .max = 2 },
343
	.n = { .min = 1, .max = 2 },
365
	.m = { .min = 79, .max = 126 },
344
	.m = { .min = 79, .max = 126 },
366
	.m1 = { .min = 12, .max = 22 },
345
	.m1 = { .min = 12, .max = 22 },
367
	.m2 = { .min = 5, .max = 9 },
346
	.m2 = { .min = 5, .max = 9 },
368
	.p = { .min = 28, .max = 112 },
347
	.p = { .min = 28, .max = 112 },
369
	.p1 = { .min = 2, .max = 8 },
348
	.p1 = { .min = 2, .max = 8 },
370
	.p2 = { .dot_limit = 225000,
349
	.p2 = { .dot_limit = 225000,
371
		.p2_slow = 14, .p2_fast = 14 },
350
		.p2_slow = 14, .p2_fast = 14 },
372
};
351
};
373
 
352
 
374
static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = {
353
static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = {
375
	.dot = { .min = 25000, .max = 350000 },
354
	.dot = { .min = 25000, .max = 350000 },
376
	.vco = { .min = 1760000, .max = 3510000 },
355
	.vco = { .min = 1760000, .max = 3510000 },
377
	.n = { .min = 1, .max = 3 },
356
	.n = { .min = 1, .max = 3 },
378
	.m = { .min = 79, .max = 126 },
357
	.m = { .min = 79, .max = 126 },
379
	.m1 = { .min = 12, .max = 22 },
358
	.m1 = { .min = 12, .max = 22 },
380
	.m2 = { .min = 5, .max = 9 },
359
	.m2 = { .min = 5, .max = 9 },
381
	.p = { .min = 14, .max = 42 },
360
	.p = { .min = 14, .max = 42 },
382
	.p1 = { .min = 2, .max = 6 },
361
	.p1 = { .min = 2, .max = 6 },
383
	.p2 = { .dot_limit = 225000,
362
	.p2 = { .dot_limit = 225000,
384
		.p2_slow = 7, .p2_fast = 7 },
363
		.p2_slow = 7, .p2_fast = 7 },
385
};
364
};
386
 
365
 
387
static const intel_limit_t intel_limits_vlv = {
366
static const intel_limit_t intel_limits_vlv = {
388
	 /*
367
	 /*
389
	  * These are the data rate limits (measured in fast clocks)
368
	  * These are the data rate limits (measured in fast clocks)
390
	  * since those are the strictest limits we have. The fast
369
	  * since those are the strictest limits we have. The fast
391
	  * clock and actual rate limits are more relaxed, so checking
370
	  * clock and actual rate limits are more relaxed, so checking
392
	  * them would make no difference.
371
	  * them would make no difference.
393
	  */
372
	  */
394
	.dot = { .min = 25000 * 5, .max = 270000 * 5 },
373
	.dot = { .min = 25000 * 5, .max = 270000 * 5 },
395
	.vco = { .min = 4000000, .max = 6000000 },
374
	.vco = { .min = 4000000, .max = 6000000 },
396
	.n = { .min = 1, .max = 7 },
375
	.n = { .min = 1, .max = 7 },
397
	.m1 = { .min = 2, .max = 3 },
376
	.m1 = { .min = 2, .max = 3 },
398
	.m2 = { .min = 11, .max = 156 },
377
	.m2 = { .min = 11, .max = 156 },
399
	.p1 = { .min = 2, .max = 3 },
378
	.p1 = { .min = 2, .max = 3 },
400
	.p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
379
	.p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
401
};
380
};
402
 
381
 
403
static const intel_limit_t intel_limits_chv = {
382
static const intel_limit_t intel_limits_chv = {
404
	/*
383
	/*
405
	 * These are the data rate limits (measured in fast clocks)
384
	 * These are the data rate limits (measured in fast clocks)
406
	 * since those are the strictest limits we have.  The fast
385
	 * since those are the strictest limits we have.  The fast
407
	 * clock and actual rate limits are more relaxed, so checking
386
	 * clock and actual rate limits are more relaxed, so checking
408
	 * them would make no difference.
387
	 * them would make no difference.
409
	 */
388
	 */
410
	.dot = { .min = 25000 * 5, .max = 540000 * 5},
389
	.dot = { .min = 25000 * 5, .max = 540000 * 5},
411
	.vco = { .min = 4860000, .max = 6700000 },
390
	.vco = { .min = 4860000, .max = 6700000 },
412
	.n = { .min = 1, .max = 1 },
391
	.n = { .min = 1, .max = 1 },
413
	.m1 = { .min = 2, .max = 2 },
392
	.m1 = { .min = 2, .max = 2 },
414
	.m2 = { .min = 24 << 22, .max = 175 << 22 },
393
	.m2 = { .min = 24 << 22, .max = 175 << 22 },
415
	.p1 = { .min = 2, .max = 4 },
394
	.p1 = { .min = 2, .max = 4 },
416
	.p2 = {	.p2_slow = 1, .p2_fast = 14 },
395
	.p2 = {	.p2_slow = 1, .p2_fast = 14 },
417
};
396
};
418
 
397
 
419
static void vlv_clock(int refclk, intel_clock_t *clock)
398
static void vlv_clock(int refclk, intel_clock_t *clock)
420
{
399
{
421
	clock->m = clock->m1 * clock->m2;
400
	clock->m = clock->m1 * clock->m2;
422
	clock->p = clock->p1 * clock->p2;
401
	clock->p = clock->p1 * clock->p2;
423
	if (WARN_ON(clock->n == 0 || clock->p == 0))
402
	if (WARN_ON(clock->n == 0 || clock->p == 0))
424
		return;
403
		return;
425
	clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
404
	clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
426
	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
405
	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
427
}
406
}
428
 
407
 
429
/**
408
/**
430
 * Returns whether any output on the specified pipe is of the specified type
409
 * Returns whether any output on the specified pipe is of the specified type
431
 */
410
 */
432
static bool intel_pipe_has_type(struct drm_crtc *crtc, int type)
411
bool intel_pipe_has_type(struct intel_crtc *crtc, enum intel_output_type type)
433
{
412
{
434
	struct drm_device *dev = crtc->dev;
413
	struct drm_device *dev = crtc->base.dev;
435
	struct intel_encoder *encoder;
414
	struct intel_encoder *encoder;
436
 
415
 
437
	for_each_encoder_on_crtc(dev, crtc, encoder)
416
	for_each_encoder_on_crtc(dev, &crtc->base, encoder)
438
		if (encoder->type == type)
417
		if (encoder->type == type)
439
			return true;
418
			return true;
440
 
419
 
441
	return false;
420
	return false;
442
}
421
}
-
 
422
 
-
 
423
/**
-
 
424
 * Returns whether any output on the specified pipe will have the specified
-
 
425
 * type after a staged modeset is complete, i.e., the same as
-
 
426
 * intel_pipe_has_type() but looking at encoder->new_crtc instead of
-
 
427
 * encoder->crtc.
-
 
428
 */
-
 
429
static bool intel_pipe_will_have_type(struct intel_crtc *crtc, int type)
-
 
430
{
-
 
431
	struct drm_device *dev = crtc->base.dev;
-
 
432
	struct intel_encoder *encoder;
-
 
433
 
-
 
434
	for_each_intel_encoder(dev, encoder)
-
 
435
		if (encoder->new_crtc == crtc && encoder->type == type)
-
 
436
			return true;
-
 
437
 
-
 
438
	return false;
-
 
439
}
443
 
440
 
444
static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
441
static const intel_limit_t *intel_ironlake_limit(struct intel_crtc *crtc,
445
						int refclk)
442
						int refclk)
446
{
443
{
447
	struct drm_device *dev = crtc->dev;
444
	struct drm_device *dev = crtc->base.dev;
448
	const intel_limit_t *limit;
445
	const intel_limit_t *limit;
449
 
446
 
450
	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
447
	if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) {
451
		if (intel_is_dual_link_lvds(dev)) {
448
		if (intel_is_dual_link_lvds(dev)) {
452
			if (refclk == 100000)
449
			if (refclk == 100000)
453
				limit = &intel_limits_ironlake_dual_lvds_100m;
450
				limit = &intel_limits_ironlake_dual_lvds_100m;
454
			else
451
			else
455
				limit = &intel_limits_ironlake_dual_lvds;
452
				limit = &intel_limits_ironlake_dual_lvds;
456
		} else {
453
		} else {
457
			if (refclk == 100000)
454
			if (refclk == 100000)
458
				limit = &intel_limits_ironlake_single_lvds_100m;
455
				limit = &intel_limits_ironlake_single_lvds_100m;
459
			else
456
			else
460
				limit = &intel_limits_ironlake_single_lvds;
457
				limit = &intel_limits_ironlake_single_lvds;
461
		}
458
		}
462
	} else
459
	} else
463
		limit = &intel_limits_ironlake_dac;
460
		limit = &intel_limits_ironlake_dac;
464
 
461
 
465
	return limit;
462
	return limit;
466
}
463
}
467
 
464
 
468
static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc)
465
static const intel_limit_t *intel_g4x_limit(struct intel_crtc *crtc)
469
{
466
{
470
	struct drm_device *dev = crtc->dev;
467
	struct drm_device *dev = crtc->base.dev;
471
	const intel_limit_t *limit;
468
	const intel_limit_t *limit;
472
 
469
 
473
	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
470
	if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) {
474
		if (intel_is_dual_link_lvds(dev))
471
		if (intel_is_dual_link_lvds(dev))
475
			limit = &intel_limits_g4x_dual_channel_lvds;
472
			limit = &intel_limits_g4x_dual_channel_lvds;
476
		else
473
		else
477
			limit = &intel_limits_g4x_single_channel_lvds;
474
			limit = &intel_limits_g4x_single_channel_lvds;
478
	} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI) ||
475
	} else if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_HDMI) ||
479
		   intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) {
476
		   intel_pipe_will_have_type(crtc, INTEL_OUTPUT_ANALOG)) {
480
		limit = &intel_limits_g4x_hdmi;
477
		limit = &intel_limits_g4x_hdmi;
481
	} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) {
478
	} else if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_SDVO)) {
482
		limit = &intel_limits_g4x_sdvo;
479
		limit = &intel_limits_g4x_sdvo;
483
	} else /* The option is for other outputs */
480
	} else /* The option is for other outputs */
484
		limit = &intel_limits_i9xx_sdvo;
481
		limit = &intel_limits_i9xx_sdvo;
485
 
482
 
486
	return limit;
483
	return limit;
487
}
484
}
488
 
485
 
489
static const intel_limit_t *intel_limit(struct drm_crtc *crtc, int refclk)
486
static const intel_limit_t *intel_limit(struct intel_crtc *crtc, int refclk)
490
{
487
{
491
	struct drm_device *dev = crtc->dev;
488
	struct drm_device *dev = crtc->base.dev;
492
	const intel_limit_t *limit;
489
	const intel_limit_t *limit;
493
 
490
 
494
	if (HAS_PCH_SPLIT(dev))
491
	if (HAS_PCH_SPLIT(dev))
495
		limit = intel_ironlake_limit(crtc, refclk);
492
		limit = intel_ironlake_limit(crtc, refclk);
496
	else if (IS_G4X(dev)) {
493
	else if (IS_G4X(dev)) {
497
		limit = intel_g4x_limit(crtc);
494
		limit = intel_g4x_limit(crtc);
498
	} else if (IS_PINEVIEW(dev)) {
495
	} else if (IS_PINEVIEW(dev)) {
499
		if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
496
		if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS))
500
			limit = &intel_limits_pineview_lvds;
497
			limit = &intel_limits_pineview_lvds;
501
		else
498
		else
502
			limit = &intel_limits_pineview_sdvo;
499
			limit = &intel_limits_pineview_sdvo;
503
	} else if (IS_CHERRYVIEW(dev)) {
500
	} else if (IS_CHERRYVIEW(dev)) {
504
		limit = &intel_limits_chv;
501
		limit = &intel_limits_chv;
505
	} else if (IS_VALLEYVIEW(dev)) {
502
	} else if (IS_VALLEYVIEW(dev)) {
506
		limit = &intel_limits_vlv;
503
		limit = &intel_limits_vlv;
507
	} else if (!IS_GEN2(dev)) {
504
	} else if (!IS_GEN2(dev)) {
508
		if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
505
		if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS))
509
			limit = &intel_limits_i9xx_lvds;
506
			limit = &intel_limits_i9xx_lvds;
510
		else
507
		else
511
			limit = &intel_limits_i9xx_sdvo;
508
			limit = &intel_limits_i9xx_sdvo;
512
	} else {
509
	} else {
513
		if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
510
		if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS))
514
			limit = &intel_limits_i8xx_lvds;
511
			limit = &intel_limits_i8xx_lvds;
515
		else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO))
512
		else if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_DVO))
516
			limit = &intel_limits_i8xx_dvo;
513
			limit = &intel_limits_i8xx_dvo;
517
		else
514
		else
518
			limit = &intel_limits_i8xx_dac;
515
			limit = &intel_limits_i8xx_dac;
519
	}
516
	}
520
	return limit;
517
	return limit;
521
}
518
}
522
 
519
 
523
/* m1 is reserved as 0 in Pineview, n is a ring counter */
520
/* m1 is reserved as 0 in Pineview, n is a ring counter */
524
static void pineview_clock(int refclk, intel_clock_t *clock)
521
static void pineview_clock(int refclk, intel_clock_t *clock)
525
{
522
{
526
	clock->m = clock->m2 + 2;
523
	clock->m = clock->m2 + 2;
527
	clock->p = clock->p1 * clock->p2;
524
	clock->p = clock->p1 * clock->p2;
528
	if (WARN_ON(clock->n == 0 || clock->p == 0))
525
	if (WARN_ON(clock->n == 0 || clock->p == 0))
529
		return;
526
		return;
530
	clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
527
	clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
531
	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
528
	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
532
}
529
}
533
 
530
 
534
static uint32_t i9xx_dpll_compute_m(struct dpll *dpll)
531
static uint32_t i9xx_dpll_compute_m(struct dpll *dpll)
535
{
532
{
536
	return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
533
	return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
537
}
534
}
538
 
535
 
539
static void i9xx_clock(int refclk, intel_clock_t *clock)
536
static void i9xx_clock(int refclk, intel_clock_t *clock)
540
{
537
{
541
	clock->m = i9xx_dpll_compute_m(clock);
538
	clock->m = i9xx_dpll_compute_m(clock);
542
	clock->p = clock->p1 * clock->p2;
539
	clock->p = clock->p1 * clock->p2;
543
	if (WARN_ON(clock->n + 2 == 0 || clock->p == 0))
540
	if (WARN_ON(clock->n + 2 == 0 || clock->p == 0))
544
		return;
541
		return;
545
	clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
542
	clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
546
	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
543
	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
547
}
544
}
548
 
545
 
549
static void chv_clock(int refclk, intel_clock_t *clock)
546
static void chv_clock(int refclk, intel_clock_t *clock)
550
{
547
{
551
	clock->m = clock->m1 * clock->m2;
548
	clock->m = clock->m1 * clock->m2;
552
	clock->p = clock->p1 * clock->p2;
549
	clock->p = clock->p1 * clock->p2;
553
	if (WARN_ON(clock->n == 0 || clock->p == 0))
550
	if (WARN_ON(clock->n == 0 || clock->p == 0))
554
		return;
551
		return;
555
	clock->vco = DIV_ROUND_CLOSEST_ULL((uint64_t)refclk * clock->m,
552
	clock->vco = DIV_ROUND_CLOSEST_ULL((uint64_t)refclk * clock->m,
556
			clock->n << 22);
553
			clock->n << 22);
557
	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
554
	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
558
}
555
}
559
 
556
 
560
#define INTELPllInvalid(s)   do { /* DRM_DEBUG(s); */ return false; } while (0)
557
#define INTELPllInvalid(s)   do { /* DRM_DEBUG(s); */ return false; } while (0)
561
/**
558
/**
562
 * Returns whether the given set of divisors are valid for a given refclk with
559
 * Returns whether the given set of divisors are valid for a given refclk with
563
 * the given connectors.
560
 * the given connectors.
564
 */
561
 */
565
 
562
 
566
static bool intel_PLL_is_valid(struct drm_device *dev,
563
static bool intel_PLL_is_valid(struct drm_device *dev,
567
			       const intel_limit_t *limit,
564
			       const intel_limit_t *limit,
568
			       const intel_clock_t *clock)
565
			       const intel_clock_t *clock)
569
{
566
{
570
	if (clock->n   < limit->n.min   || limit->n.max   < clock->n)
567
	if (clock->n   < limit->n.min   || limit->n.max   < clock->n)
571
		INTELPllInvalid("n out of range\n");
568
		INTELPllInvalid("n out of range\n");
572
	if (clock->p1  < limit->p1.min  || limit->p1.max  < clock->p1)
569
	if (clock->p1  < limit->p1.min  || limit->p1.max  < clock->p1)
573
		INTELPllInvalid("p1 out of range\n");
570
		INTELPllInvalid("p1 out of range\n");
574
	if (clock->m2  < limit->m2.min  || limit->m2.max  < clock->m2)
571
	if (clock->m2  < limit->m2.min  || limit->m2.max  < clock->m2)
575
		INTELPllInvalid("m2 out of range\n");
572
		INTELPllInvalid("m2 out of range\n");
576
	if (clock->m1  < limit->m1.min  || limit->m1.max  < clock->m1)
573
	if (clock->m1  < limit->m1.min  || limit->m1.max  < clock->m1)
577
		INTELPllInvalid("m1 out of range\n");
574
		INTELPllInvalid("m1 out of range\n");
578
 
575
 
579
	if (!IS_PINEVIEW(dev) && !IS_VALLEYVIEW(dev))
576
	if (!IS_PINEVIEW(dev) && !IS_VALLEYVIEW(dev))
580
		if (clock->m1 <= clock->m2)
577
		if (clock->m1 <= clock->m2)
581
		INTELPllInvalid("m1 <= m2\n");
578
		INTELPllInvalid("m1 <= m2\n");
582
 
579
 
583
	if (!IS_VALLEYVIEW(dev)) {
580
	if (!IS_VALLEYVIEW(dev)) {
584
		if (clock->p < limit->p.min || limit->p.max < clock->p)
581
		if (clock->p < limit->p.min || limit->p.max < clock->p)
585
			INTELPllInvalid("p out of range\n");
582
			INTELPllInvalid("p out of range\n");
586
	if (clock->m   < limit->m.min   || limit->m.max   < clock->m)
583
	if (clock->m   < limit->m.min   || limit->m.max   < clock->m)
587
		INTELPllInvalid("m out of range\n");
584
		INTELPllInvalid("m out of range\n");
588
	}
585
	}
589
 
586
 
590
	if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
587
	if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
591
		INTELPllInvalid("vco out of range\n");
588
		INTELPllInvalid("vco out of range\n");
592
	/* XXX: We may need to be checking "Dot clock" depending on the multiplier,
589
	/* XXX: We may need to be checking "Dot clock" depending on the multiplier,
593
	 * connector, etc., rather than just a single range.
590
	 * connector, etc., rather than just a single range.
594
	 */
591
	 */
595
	if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
592
	if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
596
		INTELPllInvalid("dot out of range\n");
593
		INTELPllInvalid("dot out of range\n");
597
 
594
 
598
	return true;
595
	return true;
599
}
596
}
600
 
597
 
601
static bool
598
static bool
602
i9xx_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
599
i9xx_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
603
		    int target, int refclk, intel_clock_t *match_clock,
600
		    int target, int refclk, intel_clock_t *match_clock,
604
		    intel_clock_t *best_clock)
601
		    intel_clock_t *best_clock)
605
{
602
{
606
	struct drm_device *dev = crtc->dev;
603
	struct drm_device *dev = crtc->base.dev;
607
	intel_clock_t clock;
604
	intel_clock_t clock;
608
	int err = target;
605
	int err = target;
609
 
606
 
610
	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
607
	if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) {
611
		/*
608
		/*
612
		 * For LVDS just rely on its current settings for dual-channel.
609
		 * For LVDS just rely on its current settings for dual-channel.
613
		 * We haven't figured out how to reliably set up different
610
		 * We haven't figured out how to reliably set up different
614
		 * single/dual channel state, if we even can.
611
		 * single/dual channel state, if we even can.
615
		 */
612
		 */
616
		if (intel_is_dual_link_lvds(dev))
613
		if (intel_is_dual_link_lvds(dev))
617
			clock.p2 = limit->p2.p2_fast;
614
			clock.p2 = limit->p2.p2_fast;
618
		else
615
		else
619
			clock.p2 = limit->p2.p2_slow;
616
			clock.p2 = limit->p2.p2_slow;
620
	} else {
617
	} else {
621
		if (target < limit->p2.dot_limit)
618
		if (target < limit->p2.dot_limit)
622
			clock.p2 = limit->p2.p2_slow;
619
			clock.p2 = limit->p2.p2_slow;
623
		else
620
		else
624
			clock.p2 = limit->p2.p2_fast;
621
			clock.p2 = limit->p2.p2_fast;
625
	}
622
	}
626
 
623
 
627
	memset(best_clock, 0, sizeof(*best_clock));
624
	memset(best_clock, 0, sizeof(*best_clock));
628
 
625
 
629
	for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
626
	for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
630
	     clock.m1++) {
627
	     clock.m1++) {
631
		for (clock.m2 = limit->m2.min;
628
		for (clock.m2 = limit->m2.min;
632
		     clock.m2 <= limit->m2.max; clock.m2++) {
629
		     clock.m2 <= limit->m2.max; clock.m2++) {
633
			if (clock.m2 >= clock.m1)
630
			if (clock.m2 >= clock.m1)
634
				break;
631
				break;
635
			for (clock.n = limit->n.min;
632
			for (clock.n = limit->n.min;
636
			     clock.n <= limit->n.max; clock.n++) {
633
			     clock.n <= limit->n.max; clock.n++) {
637
				for (clock.p1 = limit->p1.min;
634
				for (clock.p1 = limit->p1.min;
638
					clock.p1 <= limit->p1.max; clock.p1++) {
635
					clock.p1 <= limit->p1.max; clock.p1++) {
639
					int this_err;
636
					int this_err;
640
 
637
 
641
					i9xx_clock(refclk, &clock);
638
					i9xx_clock(refclk, &clock);
642
					if (!intel_PLL_is_valid(dev, limit,
639
					if (!intel_PLL_is_valid(dev, limit,
643
								&clock))
640
								&clock))
644
						continue;
641
						continue;
645
					if (match_clock &&
642
					if (match_clock &&
646
					    clock.p != match_clock->p)
643
					    clock.p != match_clock->p)
647
						continue;
644
						continue;
648
 
645
 
649
					this_err = abs(clock.dot - target);
646
					this_err = abs(clock.dot - target);
650
					if (this_err < err) {
647
					if (this_err < err) {
651
						*best_clock = clock;
648
						*best_clock = clock;
652
						err = this_err;
649
						err = this_err;
653
					}
650
					}
654
				}
651
				}
655
			}
652
			}
656
		}
653
		}
657
	}
654
	}
658
 
655
 
659
	return (err != target);
656
	return (err != target);
660
}
657
}
661
 
658
 
662
static bool
659
static bool
663
pnv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
660
pnv_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
664
		   int target, int refclk, intel_clock_t *match_clock,
661
		   int target, int refclk, intel_clock_t *match_clock,
665
		   intel_clock_t *best_clock)
662
		   intel_clock_t *best_clock)
666
{
663
{
667
	struct drm_device *dev = crtc->dev;
664
	struct drm_device *dev = crtc->base.dev;
668
	intel_clock_t clock;
665
	intel_clock_t clock;
669
	int err = target;
666
	int err = target;
670
 
667
 
671
	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
668
	if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) {
672
		/*
669
		/*
673
		 * For LVDS just rely on its current settings for dual-channel.
670
		 * For LVDS just rely on its current settings for dual-channel.
674
		 * We haven't figured out how to reliably set up different
671
		 * We haven't figured out how to reliably set up different
675
		 * single/dual channel state, if we even can.
672
		 * single/dual channel state, if we even can.
676
		 */
673
		 */
677
		if (intel_is_dual_link_lvds(dev))
674
		if (intel_is_dual_link_lvds(dev))
678
			clock.p2 = limit->p2.p2_fast;
675
			clock.p2 = limit->p2.p2_fast;
679
		else
676
		else
680
			clock.p2 = limit->p2.p2_slow;
677
			clock.p2 = limit->p2.p2_slow;
681
	} else {
678
	} else {
682
		if (target < limit->p2.dot_limit)
679
		if (target < limit->p2.dot_limit)
683
			clock.p2 = limit->p2.p2_slow;
680
			clock.p2 = limit->p2.p2_slow;
684
		else
681
		else
685
			clock.p2 = limit->p2.p2_fast;
682
			clock.p2 = limit->p2.p2_fast;
686
	}
683
	}
687
 
684
 
688
	memset(best_clock, 0, sizeof(*best_clock));
685
	memset(best_clock, 0, sizeof(*best_clock));
689
 
686
 
690
	for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
687
	for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
691
	     clock.m1++) {
688
	     clock.m1++) {
692
		for (clock.m2 = limit->m2.min;
689
		for (clock.m2 = limit->m2.min;
693
		     clock.m2 <= limit->m2.max; clock.m2++) {
690
		     clock.m2 <= limit->m2.max; clock.m2++) {
694
			for (clock.n = limit->n.min;
691
			for (clock.n = limit->n.min;
695
			     clock.n <= limit->n.max; clock.n++) {
692
			     clock.n <= limit->n.max; clock.n++) {
696
				for (clock.p1 = limit->p1.min;
693
				for (clock.p1 = limit->p1.min;
697
					clock.p1 <= limit->p1.max; clock.p1++) {
694
					clock.p1 <= limit->p1.max; clock.p1++) {
698
					int this_err;
695
					int this_err;
699
 
696
 
700
					pineview_clock(refclk, &clock);
697
					pineview_clock(refclk, &clock);
701
					if (!intel_PLL_is_valid(dev, limit,
698
					if (!intel_PLL_is_valid(dev, limit,
702
								&clock))
699
								&clock))
703
						continue;
700
						continue;
704
					if (match_clock &&
701
					if (match_clock &&
705
					    clock.p != match_clock->p)
702
					    clock.p != match_clock->p)
706
						continue;
703
						continue;
707
 
704
 
708
					this_err = abs(clock.dot - target);
705
					this_err = abs(clock.dot - target);
709
					if (this_err < err) {
706
					if (this_err < err) {
710
						*best_clock = clock;
707
						*best_clock = clock;
711
						err = this_err;
708
						err = this_err;
712
					}
709
					}
713
				}
710
				}
714
			}
711
			}
715
		}
712
		}
716
	}
713
	}
717
 
714
 
718
	return (err != target);
715
	return (err != target);
719
}
716
}
720
 
717
 
721
static bool
718
static bool
722
g4x_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
719
g4x_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
723
			int target, int refclk, intel_clock_t *match_clock,
720
			int target, int refclk, intel_clock_t *match_clock,
724
			intel_clock_t *best_clock)
721
			intel_clock_t *best_clock)
725
{
722
{
726
	struct drm_device *dev = crtc->dev;
723
	struct drm_device *dev = crtc->base.dev;
727
	intel_clock_t clock;
724
	intel_clock_t clock;
728
	int max_n;
725
	int max_n;
729
	bool found;
726
	bool found;
730
	/* approximately equals target * 0.00585 */
727
	/* approximately equals target * 0.00585 */
731
	int err_most = (target >> 8) + (target >> 9);
728
	int err_most = (target >> 8) + (target >> 9);
732
	found = false;
729
	found = false;
733
 
730
 
734
	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
731
	if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) {
735
		if (intel_is_dual_link_lvds(dev))
732
		if (intel_is_dual_link_lvds(dev))
736
			clock.p2 = limit->p2.p2_fast;
733
			clock.p2 = limit->p2.p2_fast;
737
		else
734
		else
738
			clock.p2 = limit->p2.p2_slow;
735
			clock.p2 = limit->p2.p2_slow;
739
	} else {
736
	} else {
740
		if (target < limit->p2.dot_limit)
737
		if (target < limit->p2.dot_limit)
741
			clock.p2 = limit->p2.p2_slow;
738
			clock.p2 = limit->p2.p2_slow;
742
		else
739
		else
743
			clock.p2 = limit->p2.p2_fast;
740
			clock.p2 = limit->p2.p2_fast;
744
	}
741
	}
745
 
742
 
746
	memset(best_clock, 0, sizeof(*best_clock));
743
	memset(best_clock, 0, sizeof(*best_clock));
747
	max_n = limit->n.max;
744
	max_n = limit->n.max;
748
	/* based on hardware requirement, prefer smaller n to precision */
745
	/* based on hardware requirement, prefer smaller n to precision */
749
	for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
746
	for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
750
		/* based on hardware requirement, prefere larger m1,m2 */
747
		/* based on hardware requirement, prefere larger m1,m2 */
751
		for (clock.m1 = limit->m1.max;
748
		for (clock.m1 = limit->m1.max;
752
		     clock.m1 >= limit->m1.min; clock.m1--) {
749
		     clock.m1 >= limit->m1.min; clock.m1--) {
753
			for (clock.m2 = limit->m2.max;
750
			for (clock.m2 = limit->m2.max;
754
			     clock.m2 >= limit->m2.min; clock.m2--) {
751
			     clock.m2 >= limit->m2.min; clock.m2--) {
755
				for (clock.p1 = limit->p1.max;
752
				for (clock.p1 = limit->p1.max;
756
				     clock.p1 >= limit->p1.min; clock.p1--) {
753
				     clock.p1 >= limit->p1.min; clock.p1--) {
757
					int this_err;
754
					int this_err;
758
 
755
 
759
					i9xx_clock(refclk, &clock);
756
					i9xx_clock(refclk, &clock);
760
					if (!intel_PLL_is_valid(dev, limit,
757
					if (!intel_PLL_is_valid(dev, limit,
761
								&clock))
758
								&clock))
762
						continue;
759
						continue;
763
 
760
 
764
					this_err = abs(clock.dot - target);
761
					this_err = abs(clock.dot - target);
765
					if (this_err < err_most) {
762
					if (this_err < err_most) {
766
						*best_clock = clock;
763
						*best_clock = clock;
767
						err_most = this_err;
764
						err_most = this_err;
768
						max_n = clock.n;
765
						max_n = clock.n;
769
						found = true;
766
						found = true;
770
					}
767
					}
771
				}
768
				}
772
			}
769
			}
773
		}
770
		}
774
	}
771
	}
775
	return found;
772
	return found;
776
}
773
}
777
 
774
 
778
static bool
775
static bool
779
vlv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
776
vlv_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
780
			int target, int refclk, intel_clock_t *match_clock,
777
			int target, int refclk, intel_clock_t *match_clock,
781
			intel_clock_t *best_clock)
778
			intel_clock_t *best_clock)
782
{
779
{
783
	struct drm_device *dev = crtc->dev;
780
	struct drm_device *dev = crtc->base.dev;
784
	intel_clock_t clock;
781
	intel_clock_t clock;
785
	unsigned int bestppm = 1000000;
782
	unsigned int bestppm = 1000000;
786
	/* min update 19.2 MHz */
783
	/* min update 19.2 MHz */
787
	int max_n = min(limit->n.max, refclk / 19200);
784
	int max_n = min(limit->n.max, refclk / 19200);
788
	bool found = false;
785
	bool found = false;
789
 
786
 
790
	target *= 5; /* fast clock */
787
	target *= 5; /* fast clock */
791
 
788
 
792
	memset(best_clock, 0, sizeof(*best_clock));
789
	memset(best_clock, 0, sizeof(*best_clock));
793
 
790
 
794
	/* based on hardware requirement, prefer smaller n to precision */
791
	/* based on hardware requirement, prefer smaller n to precision */
795
	for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
792
	for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
796
		for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
793
		for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
797
			for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow;
794
			for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow;
798
			     clock.p2 -= clock.p2 > 10 ? 2 : 1) {
795
			     clock.p2 -= clock.p2 > 10 ? 2 : 1) {
799
				clock.p = clock.p1 * clock.p2;
796
				clock.p = clock.p1 * clock.p2;
800
				/* based on hardware requirement, prefer bigger m1,m2 values */
797
				/* based on hardware requirement, prefer bigger m1,m2 values */
801
				for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
798
				for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
802
					unsigned int ppm, diff;
799
					unsigned int ppm, diff;
803
 
800
 
804
					clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
801
					clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
805
								     refclk * clock.m1);
802
								     refclk * clock.m1);
806
 
803
 
807
					vlv_clock(refclk, &clock);
804
					vlv_clock(refclk, &clock);
808
 
805
 
809
					if (!intel_PLL_is_valid(dev, limit,
806
					if (!intel_PLL_is_valid(dev, limit,
810
								&clock))
807
								&clock))
811
						continue;
808
						continue;
812
 
809
 
813
					diff = abs(clock.dot - target);
810
					diff = abs(clock.dot - target);
814
					ppm = div_u64(1000000ULL * diff, target);
811
					ppm = div_u64(1000000ULL * diff, target);
815
 
812
 
816
					if (ppm < 100 && clock.p > best_clock->p) {
813
					if (ppm < 100 && clock.p > best_clock->p) {
817
							bestppm = 0;
814
							bestppm = 0;
818
						*best_clock = clock;
815
						*best_clock = clock;
819
						found = true;
816
						found = true;
820
						}
817
						}
821
 
818
 
822
					if (bestppm >= 10 && ppm < bestppm - 10) {
819
					if (bestppm >= 10 && ppm < bestppm - 10) {
823
						bestppm = ppm;
820
						bestppm = ppm;
824
						*best_clock = clock;
821
						*best_clock = clock;
825
						found = true;
822
						found = true;
826
						}
823
						}
827
						}
824
						}
828
					}
825
					}
829
				}
826
				}
830
			}
827
			}
831
 
828
 
832
	return found;
829
	return found;
833
}
830
}
834
 
831
 
835
static bool
832
static bool
836
chv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
833
chv_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
837
		   int target, int refclk, intel_clock_t *match_clock,
834
		   int target, int refclk, intel_clock_t *match_clock,
838
		   intel_clock_t *best_clock)
835
		   intel_clock_t *best_clock)
839
{
836
{
840
	struct drm_device *dev = crtc->dev;
837
	struct drm_device *dev = crtc->base.dev;
841
	intel_clock_t clock;
838
	intel_clock_t clock;
842
	uint64_t m2;
839
	uint64_t m2;
843
	int found = false;
840
	int found = false;
844
 
841
 
845
	memset(best_clock, 0, sizeof(*best_clock));
842
	memset(best_clock, 0, sizeof(*best_clock));
846
 
843
 
847
	/*
844
	/*
848
	 * Based on hardware doc, the n always set to 1, and m1 always
845
	 * Based on hardware doc, the n always set to 1, and m1 always
849
	 * set to 2.  If requires to support 200Mhz refclk, we need to
846
	 * set to 2.  If requires to support 200Mhz refclk, we need to
850
	 * revisit this because n may not 1 anymore.
847
	 * revisit this because n may not 1 anymore.
851
	 */
848
	 */
852
	clock.n = 1, clock.m1 = 2;
849
	clock.n = 1, clock.m1 = 2;
853
	target *= 5;	/* fast clock */
850
	target *= 5;	/* fast clock */
854
 
851
 
855
	for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
852
	for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
856
		for (clock.p2 = limit->p2.p2_fast;
853
		for (clock.p2 = limit->p2.p2_fast;
857
				clock.p2 >= limit->p2.p2_slow;
854
				clock.p2 >= limit->p2.p2_slow;
858
				clock.p2 -= clock.p2 > 10 ? 2 : 1) {
855
				clock.p2 -= clock.p2 > 10 ? 2 : 1) {
859
 
856
 
860
			clock.p = clock.p1 * clock.p2;
857
			clock.p = clock.p1 * clock.p2;
861
 
858
 
862
			m2 = DIV_ROUND_CLOSEST_ULL(((uint64_t)target * clock.p *
859
			m2 = DIV_ROUND_CLOSEST_ULL(((uint64_t)target * clock.p *
863
					clock.n) << 22, refclk * clock.m1);
860
					clock.n) << 22, refclk * clock.m1);
864
 
861
 
865
			if (m2 > INT_MAX/clock.m1)
862
			if (m2 > INT_MAX/clock.m1)
866
				continue;
863
				continue;
867
 
864
 
868
			clock.m2 = m2;
865
			clock.m2 = m2;
869
 
866
 
870
			chv_clock(refclk, &clock);
867
			chv_clock(refclk, &clock);
871
 
868
 
872
			if (!intel_PLL_is_valid(dev, limit, &clock))
869
			if (!intel_PLL_is_valid(dev, limit, &clock))
873
				continue;
870
				continue;
874
 
871
 
875
			/* based on hardware requirement, prefer bigger p
872
			/* based on hardware requirement, prefer bigger p
876
			 */
873
			 */
877
			if (clock.p > best_clock->p) {
874
			if (clock.p > best_clock->p) {
878
				*best_clock = clock;
875
				*best_clock = clock;
879
				found = true;
876
				found = true;
880
			}
877
			}
881
		}
878
		}
882
	}
879
	}
883
 
880
 
884
	return found;
881
	return found;
885
}
882
}
886
 
883
 
887
bool intel_crtc_active(struct drm_crtc *crtc)
884
bool intel_crtc_active(struct drm_crtc *crtc)
888
{
885
{
889
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
886
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
890
 
887
 
891
	/* Be paranoid as we can arrive here with only partial
888
	/* Be paranoid as we can arrive here with only partial
892
	 * state retrieved from the hardware during setup.
889
	 * state retrieved from the hardware during setup.
893
	 *
890
	 *
894
	 * We can ditch the adjusted_mode.crtc_clock check as soon
891
	 * We can ditch the adjusted_mode.crtc_clock check as soon
895
	 * as Haswell has gained clock readout/fastboot support.
892
	 * as Haswell has gained clock readout/fastboot support.
896
	 *
893
	 *
897
	 * We can ditch the crtc->primary->fb check as soon as we can
894
	 * We can ditch the crtc->primary->fb check as soon as we can
898
	 * properly reconstruct framebuffers.
895
	 * properly reconstruct framebuffers.
899
	 */
896
	 */
900
	return intel_crtc->active && crtc->primary->fb &&
897
	return intel_crtc->active && crtc->primary->fb &&
901
		intel_crtc->config.adjusted_mode.crtc_clock;
898
		intel_crtc->config.adjusted_mode.crtc_clock;
902
}
899
}
903
 
900
 
904
enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
901
enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
905
					     enum pipe pipe)
902
					     enum pipe pipe)
906
{
903
{
907
	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
904
	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
908
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
905
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
909
 
906
 
910
	return intel_crtc->config.cpu_transcoder;
907
	return intel_crtc->config.cpu_transcoder;
911
}
908
}
912
 
-
 
913
static void g4x_wait_for_vblank(struct drm_device *dev, int pipe)
-
 
914
{
-
 
915
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
916
	u32 frame, frame_reg = PIPE_FRMCOUNT_GM45(pipe);
-
 
917
 
-
 
918
	frame = I915_READ(frame_reg);
-
 
919
 
-
 
920
	if (wait_for(I915_READ_NOTRACE(frame_reg) != frame, 50))
-
 
921
		WARN(1, "vblank wait timed out\n");
-
 
922
}
-
 
923
 
-
 
924
/**
-
 
925
 * intel_wait_for_vblank - wait for vblank on a given pipe
-
 
926
 * @dev: drm device
-
 
927
 * @pipe: pipe to wait for
-
 
928
 *
-
 
929
 * Wait for vblank to occur on a given pipe.  Needed for various bits of
-
 
930
 * mode setting code.
-
 
931
 */
-
 
932
void intel_wait_for_vblank(struct drm_device *dev, int pipe)
-
 
933
{
-
 
934
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
935
	int pipestat_reg = PIPESTAT(pipe);
-
 
936
 
-
 
937
	if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
-
 
938
		g4x_wait_for_vblank(dev, pipe);
-
 
939
		return;
-
 
940
	}
-
 
941
 
-
 
942
	/* Clear existing vblank status. Note this will clear any other
-
 
943
	 * sticky status fields as well.
-
 
944
	 *
-
 
945
	 * This races with i915_driver_irq_handler() with the result
-
 
946
	 * that either function could miss a vblank event.  Here it is not
-
 
947
	 * fatal, as we will either wait upon the next vblank interrupt or
-
 
948
	 * timeout.  Generally speaking intel_wait_for_vblank() is only
-
 
949
	 * called during modeset at which time the GPU should be idle and
-
 
950
	 * should *not* be performing page flips and thus not waiting on
-
 
951
	 * vblanks...
-
 
952
	 * Currently, the result of us stealing a vblank from the irq
-
 
953
	 * handler is that a single frame will be skipped during swapbuffers.
-
 
954
	 */
-
 
955
	I915_WRITE(pipestat_reg,
-
 
956
		   I915_READ(pipestat_reg) | PIPE_VBLANK_INTERRUPT_STATUS);
-
 
957
 
-
 
958
	/* Wait for vblank interrupt bit to set */
-
 
959
	if (wait_for(I915_READ(pipestat_reg) &
-
 
960
		     PIPE_VBLANK_INTERRUPT_STATUS,
-
 
961
		     50))
-
 
962
		DRM_DEBUG_KMS("vblank wait timed out\n");
-
 
963
}
-
 
964
 
909
 
965
static bool pipe_dsl_stopped(struct drm_device *dev, enum pipe pipe)
910
static bool pipe_dsl_stopped(struct drm_device *dev, enum pipe pipe)
966
{
911
{
967
	struct drm_i915_private *dev_priv = dev->dev_private;
912
	struct drm_i915_private *dev_priv = dev->dev_private;
968
	u32 reg = PIPEDSL(pipe);
913
	u32 reg = PIPEDSL(pipe);
969
	u32 line1, line2;
914
	u32 line1, line2;
970
	u32 line_mask;
915
	u32 line_mask;
971
 
916
 
972
	if (IS_GEN2(dev))
917
	if (IS_GEN2(dev))
973
		line_mask = DSL_LINEMASK_GEN2;
918
		line_mask = DSL_LINEMASK_GEN2;
974
	else
919
	else
975
		line_mask = DSL_LINEMASK_GEN3;
920
		line_mask = DSL_LINEMASK_GEN3;
976
 
921
 
977
	line1 = I915_READ(reg) & line_mask;
922
	line1 = I915_READ(reg) & line_mask;
978
	mdelay(5);
923
	mdelay(5);
979
	line2 = I915_READ(reg) & line_mask;
924
	line2 = I915_READ(reg) & line_mask;
980
 
925
 
981
	return line1 == line2;
926
	return line1 == line2;
982
}
927
}
983
 
928
 
984
/*
929
/*
985
 * intel_wait_for_pipe_off - wait for pipe to turn off
930
 * intel_wait_for_pipe_off - wait for pipe to turn off
986
 * @dev: drm device
-
 
987
 * @pipe: pipe to wait for
931
 * @crtc: crtc whose pipe to wait for
988
 *
932
 *
989
 * After disabling a pipe, we can't wait for vblank in the usual way,
933
 * After disabling a pipe, we can't wait for vblank in the usual way,
990
 * spinning on the vblank interrupt status bit, since we won't actually
934
 * spinning on the vblank interrupt status bit, since we won't actually
991
 * see an interrupt when the pipe is disabled.
935
 * see an interrupt when the pipe is disabled.
992
 *
936
 *
993
 * On Gen4 and above:
937
 * On Gen4 and above:
994
 *   wait for the pipe register state bit to turn off
938
 *   wait for the pipe register state bit to turn off
995
 *
939
 *
996
 * Otherwise:
940
 * Otherwise:
997
 *   wait for the display line value to settle (it usually
941
 *   wait for the display line value to settle (it usually
998
 *   ends up stopping at the start of the next frame).
942
 *   ends up stopping at the start of the next frame).
999
 *
943
 *
1000
 */
944
 */
1001
void intel_wait_for_pipe_off(struct drm_device *dev, int pipe)
945
static void intel_wait_for_pipe_off(struct intel_crtc *crtc)
1002
{
946
{
-
 
947
	struct drm_device *dev = crtc->base.dev;
1003
	struct drm_i915_private *dev_priv = dev->dev_private;
948
	struct drm_i915_private *dev_priv = dev->dev_private;
1004
	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
949
	enum transcoder cpu_transcoder = crtc->config.cpu_transcoder;
1005
								      pipe);
950
	enum pipe pipe = crtc->pipe;
1006
 
951
 
1007
	if (INTEL_INFO(dev)->gen >= 4) {
952
	if (INTEL_INFO(dev)->gen >= 4) {
1008
		int reg = PIPECONF(cpu_transcoder);
953
		int reg = PIPECONF(cpu_transcoder);
1009
 
954
 
1010
		/* Wait for the Pipe State to go off */
955
		/* Wait for the Pipe State to go off */
1011
		if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0,
956
		if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0,
1012
			     100))
957
			     100))
1013
			WARN(1, "pipe_off wait timed out\n");
958
			WARN(1, "pipe_off wait timed out\n");
1014
	} else {
959
	} else {
1015
		/* Wait for the display line to settle */
960
		/* Wait for the display line to settle */
1016
		if (wait_for(pipe_dsl_stopped(dev, pipe), 100))
961
		if (wait_for(pipe_dsl_stopped(dev, pipe), 100))
1017
			WARN(1, "pipe_off wait timed out\n");
962
			WARN(1, "pipe_off wait timed out\n");
1018
	}
963
	}
1019
}
964
}
1020
 
965
 
1021
/*
966
/*
1022
 * ibx_digital_port_connected - is the specified port connected?
967
 * ibx_digital_port_connected - is the specified port connected?
1023
 * @dev_priv: i915 private structure
968
 * @dev_priv: i915 private structure
1024
 * @port: the port to test
969
 * @port: the port to test
1025
 *
970
 *
1026
 * Returns true if @port is connected, false otherwise.
971
 * Returns true if @port is connected, false otherwise.
1027
 */
972
 */
1028
bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
973
bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
1029
				struct intel_digital_port *port)
974
				struct intel_digital_port *port)
1030
{
975
{
1031
	u32 bit;
976
	u32 bit;
1032
 
977
 
1033
	if (HAS_PCH_IBX(dev_priv->dev)) {
978
	if (HAS_PCH_IBX(dev_priv->dev)) {
1034
		switch (port->port) {
979
		switch (port->port) {
1035
		case PORT_B:
980
		case PORT_B:
1036
			bit = SDE_PORTB_HOTPLUG;
981
			bit = SDE_PORTB_HOTPLUG;
1037
			break;
982
			break;
1038
		case PORT_C:
983
		case PORT_C:
1039
			bit = SDE_PORTC_HOTPLUG;
984
			bit = SDE_PORTC_HOTPLUG;
1040
			break;
985
			break;
1041
		case PORT_D:
986
		case PORT_D:
1042
			bit = SDE_PORTD_HOTPLUG;
987
			bit = SDE_PORTD_HOTPLUG;
1043
			break;
988
			break;
1044
		default:
989
		default:
1045
			return true;
990
			return true;
1046
		}
991
		}
1047
	} else {
992
	} else {
1048
		switch (port->port) {
993
		switch (port->port) {
1049
		case PORT_B:
994
		case PORT_B:
1050
			bit = SDE_PORTB_HOTPLUG_CPT;
995
			bit = SDE_PORTB_HOTPLUG_CPT;
1051
			break;
996
			break;
1052
		case PORT_C:
997
		case PORT_C:
1053
			bit = SDE_PORTC_HOTPLUG_CPT;
998
			bit = SDE_PORTC_HOTPLUG_CPT;
1054
			break;
999
			break;
1055
		case PORT_D:
1000
		case PORT_D:
1056
			bit = SDE_PORTD_HOTPLUG_CPT;
1001
			bit = SDE_PORTD_HOTPLUG_CPT;
1057
			break;
1002
			break;
1058
		default:
1003
		default:
1059
			return true;
1004
			return true;
1060
		}
1005
		}
1061
	}
1006
	}
1062
 
1007
 
1063
	return I915_READ(SDEISR) & bit;
1008
	return I915_READ(SDEISR) & bit;
1064
}
1009
}
1065
 
1010
 
1066
static const char *state_string(bool enabled)
1011
static const char *state_string(bool enabled)
1067
{
1012
{
1068
	return enabled ? "on" : "off";
1013
	return enabled ? "on" : "off";
1069
}
1014
}
1070
 
1015
 
1071
/* Only for pre-ILK configs */
1016
/* Only for pre-ILK configs */
1072
void assert_pll(struct drm_i915_private *dev_priv,
1017
void assert_pll(struct drm_i915_private *dev_priv,
1073
		       enum pipe pipe, bool state)
1018
		       enum pipe pipe, bool state)
1074
{
1019
{
1075
	int reg;
1020
	int reg;
1076
	u32 val;
1021
	u32 val;
1077
	bool cur_state;
1022
	bool cur_state;
1078
 
1023
 
1079
	reg = DPLL(pipe);
1024
	reg = DPLL(pipe);
1080
	val = I915_READ(reg);
1025
	val = I915_READ(reg);
1081
	cur_state = !!(val & DPLL_VCO_ENABLE);
1026
	cur_state = !!(val & DPLL_VCO_ENABLE);
1082
	WARN(cur_state != state,
1027
	WARN(cur_state != state,
1083
	     "PLL state assertion failure (expected %s, current %s)\n",
1028
	     "PLL state assertion failure (expected %s, current %s)\n",
1084
	     state_string(state), state_string(cur_state));
1029
	     state_string(state), state_string(cur_state));
1085
}
1030
}
1086
 
1031
 
1087
/* XXX: the dsi pll is shared between MIPI DSI ports */
1032
/* XXX: the dsi pll is shared between MIPI DSI ports */
1088
static void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
1033
static void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
1089
{
1034
{
1090
	u32 val;
1035
	u32 val;
1091
	bool cur_state;
1036
	bool cur_state;
1092
 
1037
 
1093
	mutex_lock(&dev_priv->dpio_lock);
1038
	mutex_lock(&dev_priv->dpio_lock);
1094
	val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
1039
	val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
1095
	mutex_unlock(&dev_priv->dpio_lock);
1040
	mutex_unlock(&dev_priv->dpio_lock);
1096
 
1041
 
1097
	cur_state = val & DSI_PLL_VCO_EN;
1042
	cur_state = val & DSI_PLL_VCO_EN;
1098
	WARN(cur_state != state,
1043
	WARN(cur_state != state,
1099
	     "DSI PLL state assertion failure (expected %s, current %s)\n",
1044
	     "DSI PLL state assertion failure (expected %s, current %s)\n",
1100
	     state_string(state), state_string(cur_state));
1045
	     state_string(state), state_string(cur_state));
1101
}
1046
}
1102
#define assert_dsi_pll_enabled(d) assert_dsi_pll(d, true)
1047
#define assert_dsi_pll_enabled(d) assert_dsi_pll(d, true)
1103
#define assert_dsi_pll_disabled(d) assert_dsi_pll(d, false)
1048
#define assert_dsi_pll_disabled(d) assert_dsi_pll(d, false)
1104
 
1049
 
1105
struct intel_shared_dpll *
1050
struct intel_shared_dpll *
1106
intel_crtc_to_shared_dpll(struct intel_crtc *crtc)
1051
intel_crtc_to_shared_dpll(struct intel_crtc *crtc)
1107
{
1052
{
1108
	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
1053
	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
1109
 
1054
 
1110
	if (crtc->config.shared_dpll < 0)
1055
	if (crtc->config.shared_dpll < 0)
1111
		return NULL;
1056
		return NULL;
1112
 
1057
 
1113
	return &dev_priv->shared_dplls[crtc->config.shared_dpll];
1058
	return &dev_priv->shared_dplls[crtc->config.shared_dpll];
1114
}
1059
}
1115
 
1060
 
1116
/* For ILK+ */
1061
/* For ILK+ */
1117
void assert_shared_dpll(struct drm_i915_private *dev_priv,
1062
void assert_shared_dpll(struct drm_i915_private *dev_priv,
1118
			       struct intel_shared_dpll *pll,
1063
			       struct intel_shared_dpll *pll,
1119
			   bool state)
1064
			   bool state)
1120
{
1065
{
1121
	bool cur_state;
1066
	bool cur_state;
1122
	struct intel_dpll_hw_state hw_state;
1067
	struct intel_dpll_hw_state hw_state;
1123
 
1068
 
1124
	if (WARN (!pll,
1069
	if (WARN (!pll,
1125
		  "asserting DPLL %s with no DPLL\n", state_string(state)))
1070
		  "asserting DPLL %s with no DPLL\n", state_string(state)))
1126
		return;
1071
		return;
1127
 
1072
 
1128
	cur_state = pll->get_hw_state(dev_priv, pll, &hw_state);
1073
	cur_state = pll->get_hw_state(dev_priv, pll, &hw_state);
1129
	WARN(cur_state != state,
1074
	WARN(cur_state != state,
1130
	     "%s assertion failure (expected %s, current %s)\n",
1075
	     "%s assertion failure (expected %s, current %s)\n",
1131
	     pll->name, state_string(state), state_string(cur_state));
1076
	     pll->name, state_string(state), state_string(cur_state));
1132
}
1077
}
1133
 
1078
 
1134
static void assert_fdi_tx(struct drm_i915_private *dev_priv,
1079
static void assert_fdi_tx(struct drm_i915_private *dev_priv,
1135
			  enum pipe pipe, bool state)
1080
			  enum pipe pipe, bool state)
1136
{
1081
{
1137
	int reg;
1082
	int reg;
1138
	u32 val;
1083
	u32 val;
1139
	bool cur_state;
1084
	bool cur_state;
1140
	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1085
	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1141
								      pipe);
1086
								      pipe);
1142
 
1087
 
1143
	if (HAS_DDI(dev_priv->dev)) {
1088
	if (HAS_DDI(dev_priv->dev)) {
1144
		/* DDI does not have a specific FDI_TX register */
1089
		/* DDI does not have a specific FDI_TX register */
1145
		reg = TRANS_DDI_FUNC_CTL(cpu_transcoder);
1090
		reg = TRANS_DDI_FUNC_CTL(cpu_transcoder);
1146
		val = I915_READ(reg);
1091
		val = I915_READ(reg);
1147
		cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
1092
		cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
1148
	} else {
1093
	} else {
1149
	reg = FDI_TX_CTL(pipe);
1094
	reg = FDI_TX_CTL(pipe);
1150
	val = I915_READ(reg);
1095
	val = I915_READ(reg);
1151
	cur_state = !!(val & FDI_TX_ENABLE);
1096
	cur_state = !!(val & FDI_TX_ENABLE);
1152
	}
1097
	}
1153
	WARN(cur_state != state,
1098
	WARN(cur_state != state,
1154
	     "FDI TX state assertion failure (expected %s, current %s)\n",
1099
	     "FDI TX state assertion failure (expected %s, current %s)\n",
1155
	     state_string(state), state_string(cur_state));
1100
	     state_string(state), state_string(cur_state));
1156
}
1101
}
1157
#define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
1102
#define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
1158
#define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
1103
#define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
1159
 
1104
 
1160
static void assert_fdi_rx(struct drm_i915_private *dev_priv,
1105
static void assert_fdi_rx(struct drm_i915_private *dev_priv,
1161
			  enum pipe pipe, bool state)
1106
			  enum pipe pipe, bool state)
1162
{
1107
{
1163
	int reg;
1108
	int reg;
1164
	u32 val;
1109
	u32 val;
1165
	bool cur_state;
1110
	bool cur_state;
1166
 
1111
 
1167
	reg = FDI_RX_CTL(pipe);
1112
	reg = FDI_RX_CTL(pipe);
1168
	val = I915_READ(reg);
1113
	val = I915_READ(reg);
1169
	cur_state = !!(val & FDI_RX_ENABLE);
1114
	cur_state = !!(val & FDI_RX_ENABLE);
1170
	WARN(cur_state != state,
1115
	WARN(cur_state != state,
1171
	     "FDI RX state assertion failure (expected %s, current %s)\n",
1116
	     "FDI RX state assertion failure (expected %s, current %s)\n",
1172
	     state_string(state), state_string(cur_state));
1117
	     state_string(state), state_string(cur_state));
1173
}
1118
}
1174
#define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
1119
#define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
1175
#define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
1120
#define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
1176
 
1121
 
1177
static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
1122
static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
1178
				      enum pipe pipe)
1123
				      enum pipe pipe)
1179
{
1124
{
1180
	int reg;
1125
	int reg;
1181
	u32 val;
1126
	u32 val;
1182
 
1127
 
1183
	/* ILK FDI PLL is always enabled */
1128
	/* ILK FDI PLL is always enabled */
1184
	if (INTEL_INFO(dev_priv->dev)->gen == 5)
1129
	if (INTEL_INFO(dev_priv->dev)->gen == 5)
1185
		return;
1130
		return;
1186
 
1131
 
1187
	/* On Haswell, DDI ports are responsible for the FDI PLL setup */
1132
	/* On Haswell, DDI ports are responsible for the FDI PLL setup */
1188
	if (HAS_DDI(dev_priv->dev))
1133
	if (HAS_DDI(dev_priv->dev))
1189
		return;
1134
		return;
1190
 
1135
 
1191
	reg = FDI_TX_CTL(pipe);
1136
	reg = FDI_TX_CTL(pipe);
1192
	val = I915_READ(reg);
1137
	val = I915_READ(reg);
1193
	WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
1138
	WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
1194
}
1139
}
1195
 
1140
 
1196
void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
1141
void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
1197
		       enum pipe pipe, bool state)
1142
		       enum pipe pipe, bool state)
1198
{
1143
{
1199
	int reg;
1144
	int reg;
1200
	u32 val;
1145
	u32 val;
1201
	bool cur_state;
1146
	bool cur_state;
1202
 
1147
 
1203
	reg = FDI_RX_CTL(pipe);
1148
	reg = FDI_RX_CTL(pipe);
1204
	val = I915_READ(reg);
1149
	val = I915_READ(reg);
1205
	cur_state = !!(val & FDI_RX_PLL_ENABLE);
1150
	cur_state = !!(val & FDI_RX_PLL_ENABLE);
1206
	WARN(cur_state != state,
1151
	WARN(cur_state != state,
1207
	     "FDI RX PLL assertion failure (expected %s, current %s)\n",
1152
	     "FDI RX PLL assertion failure (expected %s, current %s)\n",
1208
	     state_string(state), state_string(cur_state));
1153
	     state_string(state), state_string(cur_state));
1209
}
1154
}
1210
 
1155
 
1211
static void assert_panel_unlocked(struct drm_i915_private *dev_priv,
1156
void assert_panel_unlocked(struct drm_i915_private *dev_priv,
1212
				  enum pipe pipe)
1157
				  enum pipe pipe)
-
 
1158
{
1213
{
1159
	struct drm_device *dev = dev_priv->dev;
1214
	int pp_reg, lvds_reg;
1160
	int pp_reg;
1215
	u32 val;
1161
	u32 val;
1216
	enum pipe panel_pipe = PIPE_A;
1162
	enum pipe panel_pipe = PIPE_A;
1217
	bool locked = true;
1163
	bool locked = true;
-
 
1164
 
-
 
1165
	if (WARN_ON(HAS_DDI(dev)))
-
 
1166
		return;
1218
 
1167
 
-
 
1168
	if (HAS_PCH_SPLIT(dev)) {
-
 
1169
		u32 port_sel;
1219
	if (HAS_PCH_SPLIT(dev_priv->dev)) {
1170
 
-
 
1171
		pp_reg = PCH_PP_CONTROL;
-
 
1172
		port_sel = I915_READ(PCH_PP_ON_DELAYS) & PANEL_PORT_SELECT_MASK;
-
 
1173
 
-
 
1174
		if (port_sel == PANEL_PORT_SELECT_LVDS &&
1220
		pp_reg = PCH_PP_CONTROL;
1175
		    I915_READ(PCH_LVDS) & LVDS_PIPEB_SELECT)
-
 
1176
			panel_pipe = PIPE_B;
-
 
1177
		/* XXX: else fix for eDP */
-
 
1178
	} else if (IS_VALLEYVIEW(dev)) {
-
 
1179
		/* presumably write lock depends on pipe, not port select */
-
 
1180
		pp_reg = VLV_PIPE_PP_CONTROL(pipe);
1221
		lvds_reg = PCH_LVDS;
1181
		panel_pipe = pipe;
1222
	} else {
1182
	} else {
-
 
1183
		pp_reg = PP_CONTROL;
1223
		pp_reg = PP_CONTROL;
1184
		if (I915_READ(LVDS) & LVDS_PIPEB_SELECT)
1224
		lvds_reg = LVDS;
1185
			panel_pipe = PIPE_B;
1225
	}
1186
	}
1226
 
1187
 
1227
	val = I915_READ(pp_reg);
1188
	val = I915_READ(pp_reg);
1228
	if (!(val & PANEL_POWER_ON) ||
1189
	if (!(val & PANEL_POWER_ON) ||
1229
	    ((val & PANEL_UNLOCK_REGS) == PANEL_UNLOCK_REGS))
1190
	    ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
1230
		locked = false;
1191
		locked = false;
1231
 
-
 
1232
	if (I915_READ(lvds_reg) & LVDS_PIPEB_SELECT)
-
 
1233
		panel_pipe = PIPE_B;
-
 
1234
 
1192
 
1235
	WARN(panel_pipe == pipe && locked,
1193
	WARN(panel_pipe == pipe && locked,
1236
	     "panel assertion failure, pipe %c regs locked\n",
1194
	     "panel assertion failure, pipe %c regs locked\n",
1237
	     pipe_name(pipe));
1195
	     pipe_name(pipe));
1238
}
1196
}
1239
 
1197
 
1240
static void assert_cursor(struct drm_i915_private *dev_priv,
1198
static void assert_cursor(struct drm_i915_private *dev_priv,
1241
			  enum pipe pipe, bool state)
1199
			  enum pipe pipe, bool state)
1242
{
1200
{
1243
	struct drm_device *dev = dev_priv->dev;
1201
	struct drm_device *dev = dev_priv->dev;
1244
	bool cur_state;
1202
	bool cur_state;
1245
 
1203
 
1246
	if (IS_845G(dev) || IS_I865G(dev))
1204
	if (IS_845G(dev) || IS_I865G(dev))
1247
		cur_state = I915_READ(_CURACNTR) & CURSOR_ENABLE;
1205
		cur_state = I915_READ(_CURACNTR) & CURSOR_ENABLE;
1248
	else
1206
	else
1249
		cur_state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
1207
		cur_state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
1250
 
1208
 
1251
	WARN(cur_state != state,
1209
	WARN(cur_state != state,
1252
	     "cursor on pipe %c assertion failure (expected %s, current %s)\n",
1210
	     "cursor on pipe %c assertion failure (expected %s, current %s)\n",
1253
	     pipe_name(pipe), state_string(state), state_string(cur_state));
1211
	     pipe_name(pipe), state_string(state), state_string(cur_state));
1254
}
1212
}
1255
#define assert_cursor_enabled(d, p) assert_cursor(d, p, true)
1213
#define assert_cursor_enabled(d, p) assert_cursor(d, p, true)
1256
#define assert_cursor_disabled(d, p) assert_cursor(d, p, false)
1214
#define assert_cursor_disabled(d, p) assert_cursor(d, p, false)
1257
 
1215
 
1258
void assert_pipe(struct drm_i915_private *dev_priv,
1216
void assert_pipe(struct drm_i915_private *dev_priv,
1259
			enum pipe pipe, bool state)
1217
			enum pipe pipe, bool state)
1260
{
1218
{
1261
	int reg;
1219
	int reg;
1262
	u32 val;
1220
	u32 val;
1263
	bool cur_state;
1221
	bool cur_state;
1264
	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1222
	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1265
								      pipe);
1223
								      pipe);
1266
 
1224
 
1267
	/* if we need the pipe A quirk it must be always on */
1225
	/* if we need the pipe quirk it must be always on */
-
 
1226
	if ((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
1268
	if (pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE)
1227
	    (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
1269
		state = true;
1228
		state = true;
1270
 
1229
 
1271
	if (!intel_display_power_enabled(dev_priv,
1230
	if (!intel_display_power_is_enabled(dev_priv,
1272
				POWER_DOMAIN_TRANSCODER(cpu_transcoder))) {
1231
				POWER_DOMAIN_TRANSCODER(cpu_transcoder))) {
1273
		cur_state = false;
1232
		cur_state = false;
1274
	} else {
1233
	} else {
1275
	reg = PIPECONF(cpu_transcoder);
1234
	reg = PIPECONF(cpu_transcoder);
1276
	val = I915_READ(reg);
1235
	val = I915_READ(reg);
1277
	cur_state = !!(val & PIPECONF_ENABLE);
1236
	cur_state = !!(val & PIPECONF_ENABLE);
1278
	}
1237
	}
1279
 
1238
 
1280
	WARN(cur_state != state,
1239
	WARN(cur_state != state,
1281
	     "pipe %c assertion failure (expected %s, current %s)\n",
1240
	     "pipe %c assertion failure (expected %s, current %s)\n",
1282
	     pipe_name(pipe), state_string(state), state_string(cur_state));
1241
	     pipe_name(pipe), state_string(state), state_string(cur_state));
1283
}
1242
}
1284
 
1243
 
1285
static void assert_plane(struct drm_i915_private *dev_priv,
1244
static void assert_plane(struct drm_i915_private *dev_priv,
1286
			 enum plane plane, bool state)
1245
			 enum plane plane, bool state)
1287
{
1246
{
1288
	int reg;
1247
	int reg;
1289
	u32 val;
1248
	u32 val;
1290
	bool cur_state;
1249
	bool cur_state;
1291
 
1250
 
1292
	reg = DSPCNTR(plane);
1251
	reg = DSPCNTR(plane);
1293
	val = I915_READ(reg);
1252
	val = I915_READ(reg);
1294
	cur_state = !!(val & DISPLAY_PLANE_ENABLE);
1253
	cur_state = !!(val & DISPLAY_PLANE_ENABLE);
1295
	WARN(cur_state != state,
1254
	WARN(cur_state != state,
1296
	     "plane %c assertion failure (expected %s, current %s)\n",
1255
	     "plane %c assertion failure (expected %s, current %s)\n",
1297
	     plane_name(plane), state_string(state), state_string(cur_state));
1256
	     plane_name(plane), state_string(state), state_string(cur_state));
1298
}
1257
}
1299
 
1258
 
1300
#define assert_plane_enabled(d, p) assert_plane(d, p, true)
1259
#define assert_plane_enabled(d, p) assert_plane(d, p, true)
1301
#define assert_plane_disabled(d, p) assert_plane(d, p, false)
1260
#define assert_plane_disabled(d, p) assert_plane(d, p, false)
1302
 
1261
 
1303
static void assert_planes_disabled(struct drm_i915_private *dev_priv,
1262
static void assert_planes_disabled(struct drm_i915_private *dev_priv,
1304
				   enum pipe pipe)
1263
				   enum pipe pipe)
1305
{
1264
{
1306
	struct drm_device *dev = dev_priv->dev;
1265
	struct drm_device *dev = dev_priv->dev;
1307
	int reg, i;
1266
	int reg, i;
1308
	u32 val;
1267
	u32 val;
1309
	int cur_pipe;
1268
	int cur_pipe;
1310
 
1269
 
1311
	/* Primary planes are fixed to pipes on gen4+ */
1270
	/* Primary planes are fixed to pipes on gen4+ */
1312
	if (INTEL_INFO(dev)->gen >= 4) {
1271
	if (INTEL_INFO(dev)->gen >= 4) {
1313
		reg = DSPCNTR(pipe);
1272
		reg = DSPCNTR(pipe);
1314
		val = I915_READ(reg);
1273
		val = I915_READ(reg);
1315
		WARN(val & DISPLAY_PLANE_ENABLE,
1274
		WARN(val & DISPLAY_PLANE_ENABLE,
1316
		     "plane %c assertion failure, should be disabled but not\n",
1275
		     "plane %c assertion failure, should be disabled but not\n",
1317
		     plane_name(pipe));
1276
		     plane_name(pipe));
1318
		return;
1277
		return;
1319
	}
1278
	}
1320
 
1279
 
1321
	/* Need to check both planes against the pipe */
1280
	/* Need to check both planes against the pipe */
1322
	for_each_pipe(i) {
1281
	for_each_pipe(dev_priv, i) {
1323
		reg = DSPCNTR(i);
1282
		reg = DSPCNTR(i);
1324
		val = I915_READ(reg);
1283
		val = I915_READ(reg);
1325
		cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
1284
		cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
1326
			DISPPLANE_SEL_PIPE_SHIFT;
1285
			DISPPLANE_SEL_PIPE_SHIFT;
1327
		WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe,
1286
		WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe,
1328
		     "plane %c assertion failure, should be off on pipe %c but is still active\n",
1287
		     "plane %c assertion failure, should be off on pipe %c but is still active\n",
1329
		     plane_name(i), pipe_name(pipe));
1288
		     plane_name(i), pipe_name(pipe));
1330
	}
1289
	}
1331
}
1290
}
1332
 
1291
 
1333
static void assert_sprites_disabled(struct drm_i915_private *dev_priv,
1292
static void assert_sprites_disabled(struct drm_i915_private *dev_priv,
1334
				    enum pipe pipe)
1293
				    enum pipe pipe)
1335
{
1294
{
1336
	struct drm_device *dev = dev_priv->dev;
1295
	struct drm_device *dev = dev_priv->dev;
1337
	int reg, sprite;
1296
	int reg, sprite;
1338
	u32 val;
1297
	u32 val;
-
 
1298
 
-
 
1299
	if (INTEL_INFO(dev)->gen >= 9) {
-
 
1300
		for_each_sprite(pipe, sprite) {
-
 
1301
			val = I915_READ(PLANE_CTL(pipe, sprite));
-
 
1302
			WARN(val & PLANE_CTL_ENABLE,
-
 
1303
			     "plane %d assertion failure, should be off on pipe %c but is still active\n",
-
 
1304
			     sprite, pipe_name(pipe));
1339
 
1305
		}
1340
	if (IS_VALLEYVIEW(dev)) {
1306
	} else if (IS_VALLEYVIEW(dev)) {
1341
		for_each_sprite(pipe, sprite) {
1307
		for_each_sprite(pipe, sprite) {
1342
			reg = SPCNTR(pipe, sprite);
1308
			reg = SPCNTR(pipe, sprite);
1343
		val = I915_READ(reg);
1309
		val = I915_READ(reg);
1344
			WARN(val & SP_ENABLE,
1310
			WARN(val & SP_ENABLE,
1345
			     "sprite %c assertion failure, should be off on pipe %c but is still active\n",
1311
			     "sprite %c assertion failure, should be off on pipe %c but is still active\n",
1346
			     sprite_name(pipe, sprite), pipe_name(pipe));
1312
			     sprite_name(pipe, sprite), pipe_name(pipe));
1347
		}
1313
		}
1348
	} else if (INTEL_INFO(dev)->gen >= 7) {
1314
	} else if (INTEL_INFO(dev)->gen >= 7) {
1349
		reg = SPRCTL(pipe);
1315
		reg = SPRCTL(pipe);
1350
		val = I915_READ(reg);
1316
		val = I915_READ(reg);
1351
		WARN(val & SPRITE_ENABLE,
1317
		WARN(val & SPRITE_ENABLE,
1352
		     "sprite %c assertion failure, should be off on pipe %c but is still active\n",
1318
		     "sprite %c assertion failure, should be off on pipe %c but is still active\n",
1353
		     plane_name(pipe), pipe_name(pipe));
1319
		     plane_name(pipe), pipe_name(pipe));
1354
	} else if (INTEL_INFO(dev)->gen >= 5) {
1320
	} else if (INTEL_INFO(dev)->gen >= 5) {
1355
		reg = DVSCNTR(pipe);
1321
		reg = DVSCNTR(pipe);
1356
		val = I915_READ(reg);
1322
		val = I915_READ(reg);
1357
		WARN(val & DVS_ENABLE,
1323
		WARN(val & DVS_ENABLE,
1358
		     "sprite %c assertion failure, should be off on pipe %c but is still active\n",
1324
		     "sprite %c assertion failure, should be off on pipe %c but is still active\n",
1359
		     plane_name(pipe), pipe_name(pipe));
1325
		     plane_name(pipe), pipe_name(pipe));
1360
	}
1326
	}
1361
}
1327
}
-
 
1328
 
-
 
1329
static void assert_vblank_disabled(struct drm_crtc *crtc)
-
 
1330
{
-
 
1331
	if (WARN_ON(drm_crtc_vblank_get(crtc) == 0))
-
 
1332
		drm_crtc_vblank_put(crtc);
-
 
1333
}
1362
 
1334
 
1363
static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
1335
static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
1364
{
1336
{
1365
	u32 val;
1337
	u32 val;
1366
	bool enabled;
1338
	bool enabled;
1367
 
1339
 
1368
	WARN_ON(!(HAS_PCH_IBX(dev_priv->dev) || HAS_PCH_CPT(dev_priv->dev)));
1340
	WARN_ON(!(HAS_PCH_IBX(dev_priv->dev) || HAS_PCH_CPT(dev_priv->dev)));
1369
 
1341
 
1370
	val = I915_READ(PCH_DREF_CONTROL);
1342
	val = I915_READ(PCH_DREF_CONTROL);
1371
	enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
1343
	enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
1372
			    DREF_SUPERSPREAD_SOURCE_MASK));
1344
			    DREF_SUPERSPREAD_SOURCE_MASK));
1373
	WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
1345
	WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
1374
}
1346
}
1375
 
1347
 
1376
static void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
1348
static void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
1377
				       enum pipe pipe)
1349
				       enum pipe pipe)
1378
{
1350
{
1379
	int reg;
1351
	int reg;
1380
	u32 val;
1352
	u32 val;
1381
	bool enabled;
1353
	bool enabled;
1382
 
1354
 
1383
	reg = PCH_TRANSCONF(pipe);
1355
	reg = PCH_TRANSCONF(pipe);
1384
	val = I915_READ(reg);
1356
	val = I915_READ(reg);
1385
	enabled = !!(val & TRANS_ENABLE);
1357
	enabled = !!(val & TRANS_ENABLE);
1386
	WARN(enabled,
1358
	WARN(enabled,
1387
	     "transcoder assertion failed, should be off on pipe %c but is still active\n",
1359
	     "transcoder assertion failed, should be off on pipe %c but is still active\n",
1388
	     pipe_name(pipe));
1360
	     pipe_name(pipe));
1389
}
1361
}
1390
 
1362
 
1391
static bool dp_pipe_enabled(struct drm_i915_private *dev_priv,
1363
static bool dp_pipe_enabled(struct drm_i915_private *dev_priv,
1392
			    enum pipe pipe, u32 port_sel, u32 val)
1364
			    enum pipe pipe, u32 port_sel, u32 val)
1393
{
1365
{
1394
	if ((val & DP_PORT_EN) == 0)
1366
	if ((val & DP_PORT_EN) == 0)
1395
		return false;
1367
		return false;
1396
 
1368
 
1397
	if (HAS_PCH_CPT(dev_priv->dev)) {
1369
	if (HAS_PCH_CPT(dev_priv->dev)) {
1398
		u32	trans_dp_ctl_reg = TRANS_DP_CTL(pipe);
1370
		u32	trans_dp_ctl_reg = TRANS_DP_CTL(pipe);
1399
		u32	trans_dp_ctl = I915_READ(trans_dp_ctl_reg);
1371
		u32	trans_dp_ctl = I915_READ(trans_dp_ctl_reg);
1400
		if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel)
1372
		if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel)
1401
			return false;
1373
			return false;
1402
	} else if (IS_CHERRYVIEW(dev_priv->dev)) {
1374
	} else if (IS_CHERRYVIEW(dev_priv->dev)) {
1403
		if ((val & DP_PIPE_MASK_CHV) != DP_PIPE_SELECT_CHV(pipe))
1375
		if ((val & DP_PIPE_MASK_CHV) != DP_PIPE_SELECT_CHV(pipe))
1404
			return false;
1376
			return false;
1405
	} else {
1377
	} else {
1406
		if ((val & DP_PIPE_MASK) != (pipe << 30))
1378
		if ((val & DP_PIPE_MASK) != (pipe << 30))
1407
			return false;
1379
			return false;
1408
	}
1380
	}
1409
	return true;
1381
	return true;
1410
}
1382
}
1411
 
1383
 
1412
static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv,
1384
static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv,
1413
			      enum pipe pipe, u32 val)
1385
			      enum pipe pipe, u32 val)
1414
{
1386
{
1415
	if ((val & SDVO_ENABLE) == 0)
1387
	if ((val & SDVO_ENABLE) == 0)
1416
		return false;
1388
		return false;
1417
 
1389
 
1418
	if (HAS_PCH_CPT(dev_priv->dev)) {
1390
	if (HAS_PCH_CPT(dev_priv->dev)) {
1419
		if ((val & SDVO_PIPE_SEL_MASK_CPT) != SDVO_PIPE_SEL_CPT(pipe))
1391
		if ((val & SDVO_PIPE_SEL_MASK_CPT) != SDVO_PIPE_SEL_CPT(pipe))
1420
			return false;
1392
			return false;
1421
	} else if (IS_CHERRYVIEW(dev_priv->dev)) {
1393
	} else if (IS_CHERRYVIEW(dev_priv->dev)) {
1422
		if ((val & SDVO_PIPE_SEL_MASK_CHV) != SDVO_PIPE_SEL_CHV(pipe))
1394
		if ((val & SDVO_PIPE_SEL_MASK_CHV) != SDVO_PIPE_SEL_CHV(pipe))
1423
			return false;
1395
			return false;
1424
	} else {
1396
	} else {
1425
		if ((val & SDVO_PIPE_SEL_MASK) != SDVO_PIPE_SEL(pipe))
1397
		if ((val & SDVO_PIPE_SEL_MASK) != SDVO_PIPE_SEL(pipe))
1426
			return false;
1398
			return false;
1427
	}
1399
	}
1428
	return true;
1400
	return true;
1429
}
1401
}
1430
 
1402
 
1431
static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv,
1403
static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv,
1432
			      enum pipe pipe, u32 val)
1404
			      enum pipe pipe, u32 val)
1433
{
1405
{
1434
	if ((val & LVDS_PORT_EN) == 0)
1406
	if ((val & LVDS_PORT_EN) == 0)
1435
		return false;
1407
		return false;
1436
 
1408
 
1437
	if (HAS_PCH_CPT(dev_priv->dev)) {
1409
	if (HAS_PCH_CPT(dev_priv->dev)) {
1438
		if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1410
		if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1439
			return false;
1411
			return false;
1440
	} else {
1412
	} else {
1441
		if ((val & LVDS_PIPE_MASK) != LVDS_PIPE(pipe))
1413
		if ((val & LVDS_PIPE_MASK) != LVDS_PIPE(pipe))
1442
			return false;
1414
			return false;
1443
	}
1415
	}
1444
	return true;
1416
	return true;
1445
}
1417
}
1446
 
1418
 
1447
static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv,
1419
static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv,
1448
			      enum pipe pipe, u32 val)
1420
			      enum pipe pipe, u32 val)
1449
{
1421
{
1450
	if ((val & ADPA_DAC_ENABLE) == 0)
1422
	if ((val & ADPA_DAC_ENABLE) == 0)
1451
		return false;
1423
		return false;
1452
	if (HAS_PCH_CPT(dev_priv->dev)) {
1424
	if (HAS_PCH_CPT(dev_priv->dev)) {
1453
		if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1425
		if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1454
			return false;
1426
			return false;
1455
	} else {
1427
	} else {
1456
		if ((val & ADPA_PIPE_SELECT_MASK) != ADPA_PIPE_SELECT(pipe))
1428
		if ((val & ADPA_PIPE_SELECT_MASK) != ADPA_PIPE_SELECT(pipe))
1457
			return false;
1429
			return false;
1458
	}
1430
	}
1459
	return true;
1431
	return true;
1460
}
1432
}
1461
 
1433
 
1462
static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
1434
static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
1463
				   enum pipe pipe, int reg, u32 port_sel)
1435
				   enum pipe pipe, int reg, u32 port_sel)
1464
{
1436
{
1465
	u32 val = I915_READ(reg);
1437
	u32 val = I915_READ(reg);
1466
	WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val),
1438
	WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val),
1467
	     "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
1439
	     "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
1468
	     reg, pipe_name(pipe));
1440
	     reg, pipe_name(pipe));
1469
 
1441
 
1470
	WARN(HAS_PCH_IBX(dev_priv->dev) && (val & DP_PORT_EN) == 0
1442
	WARN(HAS_PCH_IBX(dev_priv->dev) && (val & DP_PORT_EN) == 0
1471
	     && (val & DP_PIPEB_SELECT),
1443
	     && (val & DP_PIPEB_SELECT),
1472
	     "IBX PCH dp port still using transcoder B\n");
1444
	     "IBX PCH dp port still using transcoder B\n");
1473
}
1445
}
1474
 
1446
 
1475
static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
1447
static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
1476
				     enum pipe pipe, int reg)
1448
				     enum pipe pipe, int reg)
1477
{
1449
{
1478
	u32 val = I915_READ(reg);
1450
	u32 val = I915_READ(reg);
1479
	WARN(hdmi_pipe_enabled(dev_priv, pipe, val),
1451
	WARN(hdmi_pipe_enabled(dev_priv, pipe, val),
1480
	     "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n",
1452
	     "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n",
1481
	     reg, pipe_name(pipe));
1453
	     reg, pipe_name(pipe));
1482
 
1454
 
1483
	WARN(HAS_PCH_IBX(dev_priv->dev) && (val & SDVO_ENABLE) == 0
1455
	WARN(HAS_PCH_IBX(dev_priv->dev) && (val & SDVO_ENABLE) == 0
1484
	     && (val & SDVO_PIPE_B_SELECT),
1456
	     && (val & SDVO_PIPE_B_SELECT),
1485
	     "IBX PCH hdmi port still using transcoder B\n");
1457
	     "IBX PCH hdmi port still using transcoder B\n");
1486
}
1458
}
1487
 
1459
 
1488
static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1460
static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1489
				      enum pipe pipe)
1461
				      enum pipe pipe)
1490
{
1462
{
1491
	int reg;
1463
	int reg;
1492
	u32 val;
1464
	u32 val;
1493
 
1465
 
1494
	assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
1466
	assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
1495
	assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
1467
	assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
1496
	assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
1468
	assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
1497
 
1469
 
1498
	reg = PCH_ADPA;
1470
	reg = PCH_ADPA;
1499
	val = I915_READ(reg);
1471
	val = I915_READ(reg);
1500
	WARN(adpa_pipe_enabled(dev_priv, pipe, val),
1472
	WARN(adpa_pipe_enabled(dev_priv, pipe, val),
1501
	     "PCH VGA enabled on transcoder %c, should be disabled\n",
1473
	     "PCH VGA enabled on transcoder %c, should be disabled\n",
1502
	     pipe_name(pipe));
1474
	     pipe_name(pipe));
1503
 
1475
 
1504
	reg = PCH_LVDS;
1476
	reg = PCH_LVDS;
1505
	val = I915_READ(reg);
1477
	val = I915_READ(reg);
1506
	WARN(lvds_pipe_enabled(dev_priv, pipe, val),
1478
	WARN(lvds_pipe_enabled(dev_priv, pipe, val),
1507
	     "PCH LVDS enabled on transcoder %c, should be disabled\n",
1479
	     "PCH LVDS enabled on transcoder %c, should be disabled\n",
1508
	     pipe_name(pipe));
1480
	     pipe_name(pipe));
1509
 
1481
 
1510
	assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIB);
1482
	assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIB);
1511
	assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIC);
1483
	assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIC);
1512
	assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMID);
1484
	assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMID);
1513
}
1485
}
1514
 
1486
 
1515
static void intel_init_dpio(struct drm_device *dev)
1487
static void intel_init_dpio(struct drm_device *dev)
1516
{
1488
{
1517
	struct drm_i915_private *dev_priv = dev->dev_private;
1489
	struct drm_i915_private *dev_priv = dev->dev_private;
1518
 
1490
 
1519
	if (!IS_VALLEYVIEW(dev))
1491
	if (!IS_VALLEYVIEW(dev))
1520
		return;
1492
		return;
1521
 
1493
 
1522
	/*
1494
	/*
1523
	 * IOSF_PORT_DPIO is used for VLV x2 PHY (DP/HDMI B and C),
1495
	 * IOSF_PORT_DPIO is used for VLV x2 PHY (DP/HDMI B and C),
1524
	 * CHV x1 PHY (DP/HDMI D)
1496
	 * CHV x1 PHY (DP/HDMI D)
1525
	 * IOSF_PORT_DPIO_2 is used for CHV x2 PHY (DP/HDMI B and C)
1497
	 * IOSF_PORT_DPIO_2 is used for CHV x2 PHY (DP/HDMI B and C)
1526
	 */
1498
	 */
1527
	if (IS_CHERRYVIEW(dev)) {
1499
	if (IS_CHERRYVIEW(dev)) {
1528
		DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO_2;
1500
		DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO_2;
1529
		DPIO_PHY_IOSF_PORT(DPIO_PHY1) = IOSF_PORT_DPIO;
1501
		DPIO_PHY_IOSF_PORT(DPIO_PHY1) = IOSF_PORT_DPIO;
1530
	} else {
1502
	} else {
1531
	DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO;
1503
	DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO;
1532
	}
1504
	}
1533
}
1505
}
1534
 
1506
 
1535
static void intel_reset_dpio(struct drm_device *dev)
-
 
1536
{
-
 
1537
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
1538
 
-
 
1539
	if (IS_CHERRYVIEW(dev)) {
-
 
1540
		enum dpio_phy phy;
-
 
1541
		u32 val;
-
 
1542
 
-
 
1543
		for (phy = DPIO_PHY0; phy < I915_NUM_PHYS_VLV; phy++) {
-
 
1544
			/* Poll for phypwrgood signal */
-
 
1545
			if (wait_for(I915_READ(DISPLAY_PHY_STATUS) &
-
 
1546
						PHY_POWERGOOD(phy), 1))
-
 
1547
				DRM_ERROR("Display PHY %d is not power up\n", phy);
-
 
1548
 
-
 
1549
	/*
-
 
1550
			 * Deassert common lane reset for PHY.
-
 
1551
			 *
1507
static void vlv_enable_pll(struct intel_crtc *crtc,
1552
			 * This should only be done on init and resume from S3
-
 
1553
			 * with both PLLs disabled, or we risk losing DPIO and
-
 
1554
			 * PLL synchronization.
-
 
1555
			 */
-
 
1556
			val = I915_READ(DISPLAY_PHY_CONTROL);
-
 
1557
			I915_WRITE(DISPLAY_PHY_CONTROL,
-
 
1558
				PHY_COM_LANE_RESET_DEASSERT(phy, val));
-
 
1559
		}
-
 
1560
	}
-
 
1561
}
-
 
1562
 
-
 
1563
static void vlv_enable_pll(struct intel_crtc *crtc)
1508
			   const struct intel_crtc_config *pipe_config)
1564
{
1509
{
1565
	struct drm_device *dev = crtc->base.dev;
1510
	struct drm_device *dev = crtc->base.dev;
1566
	struct drm_i915_private *dev_priv = dev->dev_private;
1511
	struct drm_i915_private *dev_priv = dev->dev_private;
1567
	int reg = DPLL(crtc->pipe);
1512
	int reg = DPLL(crtc->pipe);
1568
	u32 dpll = crtc->config.dpll_hw_state.dpll;
1513
	u32 dpll = pipe_config->dpll_hw_state.dpll;
1569
 
1514
 
1570
	assert_pipe_disabled(dev_priv, crtc->pipe);
1515
	assert_pipe_disabled(dev_priv, crtc->pipe);
1571
 
1516
 
1572
    /* No really, not for ILK+ */
1517
    /* No really, not for ILK+ */
1573
	BUG_ON(!IS_VALLEYVIEW(dev_priv->dev));
1518
	BUG_ON(!IS_VALLEYVIEW(dev_priv->dev));
1574
 
1519
 
1575
    /* PLL is protected by panel, make sure we can write it */
1520
    /* PLL is protected by panel, make sure we can write it */
1576
    if (IS_MOBILE(dev_priv->dev) && !IS_I830(dev_priv->dev))
1521
	if (IS_MOBILE(dev_priv->dev))
1577
		assert_panel_unlocked(dev_priv, crtc->pipe);
1522
		assert_panel_unlocked(dev_priv, crtc->pipe);
1578
 
1523
 
1579
	I915_WRITE(reg, dpll);
1524
	I915_WRITE(reg, dpll);
1580
	POSTING_READ(reg);
1525
	POSTING_READ(reg);
1581
	udelay(150);
1526
	udelay(150);
1582
 
1527
 
1583
	if (wait_for(((I915_READ(reg) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
1528
	if (wait_for(((I915_READ(reg) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
1584
		DRM_ERROR("DPLL %d failed to lock\n", crtc->pipe);
1529
		DRM_ERROR("DPLL %d failed to lock\n", crtc->pipe);
1585
 
1530
 
1586
	I915_WRITE(DPLL_MD(crtc->pipe), crtc->config.dpll_hw_state.dpll_md);
1531
	I915_WRITE(DPLL_MD(crtc->pipe), pipe_config->dpll_hw_state.dpll_md);
1587
	POSTING_READ(DPLL_MD(crtc->pipe));
1532
	POSTING_READ(DPLL_MD(crtc->pipe));
1588
 
1533
 
1589
	/* We do this three times for luck */
1534
	/* We do this three times for luck */
1590
	I915_WRITE(reg, dpll);
1535
	I915_WRITE(reg, dpll);
1591
	POSTING_READ(reg);
1536
	POSTING_READ(reg);
1592
	udelay(150); /* wait for warmup */
1537
	udelay(150); /* wait for warmup */
1593
	I915_WRITE(reg, dpll);
1538
	I915_WRITE(reg, dpll);
1594
	POSTING_READ(reg);
1539
	POSTING_READ(reg);
1595
	udelay(150); /* wait for warmup */
1540
	udelay(150); /* wait for warmup */
1596
	I915_WRITE(reg, dpll);
1541
	I915_WRITE(reg, dpll);
1597
	POSTING_READ(reg);
1542
	POSTING_READ(reg);
1598
	udelay(150); /* wait for warmup */
1543
	udelay(150); /* wait for warmup */
1599
}
1544
}
1600
 
1545
 
-
 
1546
static void chv_enable_pll(struct intel_crtc *crtc,
1601
static void chv_enable_pll(struct intel_crtc *crtc)
1547
			   const struct intel_crtc_config *pipe_config)
1602
{
1548
{
1603
	struct drm_device *dev = crtc->base.dev;
1549
	struct drm_device *dev = crtc->base.dev;
1604
	struct drm_i915_private *dev_priv = dev->dev_private;
1550
	struct drm_i915_private *dev_priv = dev->dev_private;
1605
	int pipe = crtc->pipe;
1551
	int pipe = crtc->pipe;
1606
	enum dpio_channel port = vlv_pipe_to_channel(pipe);
1552
	enum dpio_channel port = vlv_pipe_to_channel(pipe);
1607
	u32 tmp;
1553
	u32 tmp;
1608
 
1554
 
1609
	assert_pipe_disabled(dev_priv, crtc->pipe);
1555
	assert_pipe_disabled(dev_priv, crtc->pipe);
1610
 
1556
 
1611
	BUG_ON(!IS_CHERRYVIEW(dev_priv->dev));
1557
	BUG_ON(!IS_CHERRYVIEW(dev_priv->dev));
1612
 
1558
 
1613
	mutex_lock(&dev_priv->dpio_lock);
1559
	mutex_lock(&dev_priv->dpio_lock);
1614
 
1560
 
1615
	/* Enable back the 10bit clock to display controller */
1561
	/* Enable back the 10bit clock to display controller */
1616
	tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1562
	tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1617
	tmp |= DPIO_DCLKP_EN;
1563
	tmp |= DPIO_DCLKP_EN;
1618
	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp);
1564
	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp);
1619
 
1565
 
1620
	/*
1566
	/*
1621
	 * Need to wait > 100ns between dclkp clock enable bit and PLL enable.
1567
	 * Need to wait > 100ns between dclkp clock enable bit and PLL enable.
1622
	 */
1568
	 */
1623
	udelay(1);
1569
	udelay(1);
1624
 
1570
 
1625
	/* Enable PLL */
1571
	/* Enable PLL */
1626
	I915_WRITE(DPLL(pipe), crtc->config.dpll_hw_state.dpll);
1572
	I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
1627
 
1573
 
1628
	/* Check PLL is locked */
1574
	/* Check PLL is locked */
1629
	if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
1575
	if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
1630
		DRM_ERROR("PLL %d failed to lock\n", pipe);
1576
		DRM_ERROR("PLL %d failed to lock\n", pipe);
1631
 
1577
 
1632
	/* not sure when this should be written */
1578
	/* not sure when this should be written */
1633
	I915_WRITE(DPLL_MD(pipe), crtc->config.dpll_hw_state.dpll_md);
1579
	I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
1634
	POSTING_READ(DPLL_MD(pipe));
1580
	POSTING_READ(DPLL_MD(pipe));
1635
 
1581
 
1636
	mutex_unlock(&dev_priv->dpio_lock);
1582
	mutex_unlock(&dev_priv->dpio_lock);
1637
}
1583
}
-
 
1584
 
-
 
1585
static int intel_num_dvo_pipes(struct drm_device *dev)
-
 
1586
{
-
 
1587
	struct intel_crtc *crtc;
-
 
1588
	int count = 0;
-
 
1589
 
-
 
1590
	for_each_intel_crtc(dev, crtc)
-
 
1591
		count += crtc->active &&
-
 
1592
			intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO);
-
 
1593
 
-
 
1594
	return count;
-
 
1595
}
1638
 
1596
 
1639
static void i9xx_enable_pll(struct intel_crtc *crtc)
1597
static void i9xx_enable_pll(struct intel_crtc *crtc)
1640
{
1598
{
1641
	struct drm_device *dev = crtc->base.dev;
1599
	struct drm_device *dev = crtc->base.dev;
1642
	struct drm_i915_private *dev_priv = dev->dev_private;
1600
	struct drm_i915_private *dev_priv = dev->dev_private;
1643
	int reg = DPLL(crtc->pipe);
1601
	int reg = DPLL(crtc->pipe);
1644
	u32 dpll = crtc->config.dpll_hw_state.dpll;
1602
	u32 dpll = crtc->config.dpll_hw_state.dpll;
1645
 
1603
 
1646
	assert_pipe_disabled(dev_priv, crtc->pipe);
1604
	assert_pipe_disabled(dev_priv, crtc->pipe);
1647
 
1605
 
1648
	/* No really, not for ILK+ */
1606
	/* No really, not for ILK+ */
1649
	BUG_ON(INTEL_INFO(dev)->gen >= 5);
1607
	BUG_ON(INTEL_INFO(dev)->gen >= 5);
1650
 
1608
 
1651
	/* PLL is protected by panel, make sure we can write it */
1609
	/* PLL is protected by panel, make sure we can write it */
1652
	if (IS_MOBILE(dev) && !IS_I830(dev))
1610
	if (IS_MOBILE(dev) && !IS_I830(dev))
1653
		assert_panel_unlocked(dev_priv, crtc->pipe);
1611
		assert_panel_unlocked(dev_priv, crtc->pipe);
-
 
1612
 
-
 
1613
	/* Enable DVO 2x clock on both PLLs if necessary */
-
 
1614
	if (IS_I830(dev) && intel_num_dvo_pipes(dev) > 0) {
-
 
1615
		/*
-
 
1616
		 * It appears to be important that we don't enable this
-
 
1617
		 * for the current pipe before otherwise configuring the
-
 
1618
		 * PLL. No idea how this should be handled if multiple
-
 
1619
		 * DVO outputs are enabled simultaneosly.
-
 
1620
		 */
1654
 
1621
		dpll |= DPLL_DVO_2X_MODE;
-
 
1622
		I915_WRITE(DPLL(!crtc->pipe),
-
 
1623
			   I915_READ(DPLL(!crtc->pipe)) | DPLL_DVO_2X_MODE);
1655
	I915_WRITE(reg, dpll);
1624
	}
1656
 
1625
 
1657
	/* Wait for the clocks to stabilize. */
1626
	/* Wait for the clocks to stabilize. */
1658
	POSTING_READ(reg);
1627
	POSTING_READ(reg);
1659
	udelay(150);
1628
	udelay(150);
1660
 
1629
 
1661
	if (INTEL_INFO(dev)->gen >= 4) {
1630
	if (INTEL_INFO(dev)->gen >= 4) {
1662
		I915_WRITE(DPLL_MD(crtc->pipe),
1631
		I915_WRITE(DPLL_MD(crtc->pipe),
1663
			   crtc->config.dpll_hw_state.dpll_md);
1632
			   crtc->config.dpll_hw_state.dpll_md);
1664
	} else {
1633
	} else {
1665
		/* The pixel multiplier can only be updated once the
1634
		/* The pixel multiplier can only be updated once the
1666
		 * DPLL is enabled and the clocks are stable.
1635
		 * DPLL is enabled and the clocks are stable.
1667
		 *
1636
		 *
1668
		 * So write it again.
1637
		 * So write it again.
1669
		 */
1638
		 */
1670
		I915_WRITE(reg, dpll);
1639
		I915_WRITE(reg, dpll);
1671
	}
1640
	}
1672
 
1641
 
1673
    /* We do this three times for luck */
1642
    /* We do this three times for luck */
1674
	I915_WRITE(reg, dpll);
1643
	I915_WRITE(reg, dpll);
1675
    POSTING_READ(reg);
1644
    POSTING_READ(reg);
1676
    udelay(150); /* wait for warmup */
1645
    udelay(150); /* wait for warmup */
1677
	I915_WRITE(reg, dpll);
1646
	I915_WRITE(reg, dpll);
1678
    POSTING_READ(reg);
1647
    POSTING_READ(reg);
1679
    udelay(150); /* wait for warmup */
1648
    udelay(150); /* wait for warmup */
1680
	I915_WRITE(reg, dpll);
1649
	I915_WRITE(reg, dpll);
1681
    POSTING_READ(reg);
1650
    POSTING_READ(reg);
1682
    udelay(150); /* wait for warmup */
1651
    udelay(150); /* wait for warmup */
1683
}
1652
}
1684
 
1653
 
1685
/**
1654
/**
1686
 * i9xx_disable_pll - disable a PLL
1655
 * i9xx_disable_pll - disable a PLL
1687
 * @dev_priv: i915 private structure
1656
 * @dev_priv: i915 private structure
1688
 * @pipe: pipe PLL to disable
1657
 * @pipe: pipe PLL to disable
1689
 *
1658
 *
1690
 * Disable the PLL for @pipe, making sure the pipe is off first.
1659
 * Disable the PLL for @pipe, making sure the pipe is off first.
1691
 *
1660
 *
1692
 * Note!  This is for pre-ILK only.
1661
 * Note!  This is for pre-ILK only.
1693
 */
1662
 */
1694
static void i9xx_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1663
static void i9xx_disable_pll(struct intel_crtc *crtc)
1695
{
1664
{
-
 
1665
	struct drm_device *dev = crtc->base.dev;
-
 
1666
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
1667
	enum pipe pipe = crtc->pipe;
-
 
1668
 
-
 
1669
	/* Disable DVO 2x clock on both PLLs if necessary */
-
 
1670
	if (IS_I830(dev) &&
-
 
1671
	    intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO) &&
-
 
1672
	    intel_num_dvo_pipes(dev) == 1) {
-
 
1673
		I915_WRITE(DPLL(PIPE_B),
-
 
1674
			   I915_READ(DPLL(PIPE_B)) & ~DPLL_DVO_2X_MODE);
-
 
1675
		I915_WRITE(DPLL(PIPE_A),
-
 
1676
			   I915_READ(DPLL(PIPE_A)) & ~DPLL_DVO_2X_MODE);
-
 
1677
	}
-
 
1678
 
1696
	/* Don't disable pipe A or pipe A PLLs if needed */
1679
	/* Don't disable pipe or pipe PLLs if needed */
1697
	if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
1680
	if ((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
-
 
1681
	    (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
1698
		return;
1682
		return;
1699
 
1683
 
1700
	/* Make sure the pipe isn't still relying on us */
1684
	/* Make sure the pipe isn't still relying on us */
1701
	assert_pipe_disabled(dev_priv, pipe);
1685
	assert_pipe_disabled(dev_priv, pipe);
1702
 
1686
 
1703
	I915_WRITE(DPLL(pipe), 0);
1687
	I915_WRITE(DPLL(pipe), 0);
1704
	POSTING_READ(DPLL(pipe));
1688
	POSTING_READ(DPLL(pipe));
1705
}
1689
}
1706
 
1690
 
1707
static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1691
static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1708
{
1692
{
1709
	u32 val = 0;
1693
	u32 val = 0;
1710
 
1694
 
1711
	/* Make sure the pipe isn't still relying on us */
1695
	/* Make sure the pipe isn't still relying on us */
1712
	assert_pipe_disabled(dev_priv, pipe);
1696
	assert_pipe_disabled(dev_priv, pipe);
1713
 
1697
 
1714
	/*
1698
	/*
1715
	 * Leave integrated clock source and reference clock enabled for pipe B.
1699
	 * Leave integrated clock source and reference clock enabled for pipe B.
1716
	 * The latter is needed for VGA hotplug / manual detection.
1700
	 * The latter is needed for VGA hotplug / manual detection.
1717
	 */
1701
	 */
1718
	if (pipe == PIPE_B)
1702
	if (pipe == PIPE_B)
1719
		val = DPLL_INTEGRATED_CRI_CLK_VLV | DPLL_REFA_CLK_ENABLE_VLV;
1703
		val = DPLL_INTEGRATED_CRI_CLK_VLV | DPLL_REFA_CLK_ENABLE_VLV;
1720
	I915_WRITE(DPLL(pipe), val);
1704
	I915_WRITE(DPLL(pipe), val);
1721
	POSTING_READ(DPLL(pipe));
1705
	POSTING_READ(DPLL(pipe));
1722
 
1706
 
1723
}
1707
}
1724
 
1708
 
1725
static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1709
static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1726
{
1710
{
1727
	enum dpio_channel port = vlv_pipe_to_channel(pipe);
1711
	enum dpio_channel port = vlv_pipe_to_channel(pipe);
1728
	u32 val;
1712
	u32 val;
1729
 
1713
 
1730
	/* Make sure the pipe isn't still relying on us */
1714
	/* Make sure the pipe isn't still relying on us */
1731
	assert_pipe_disabled(dev_priv, pipe);
1715
	assert_pipe_disabled(dev_priv, pipe);
1732
 
1716
 
1733
	/* Set PLL en = 0 */
1717
	/* Set PLL en = 0 */
1734
	val = DPLL_SSC_REF_CLOCK_CHV;
1718
	val = DPLL_SSC_REF_CLOCK_CHV | DPLL_REFA_CLK_ENABLE_VLV;
1735
	if (pipe != PIPE_A)
1719
	if (pipe != PIPE_A)
1736
		val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1720
		val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1737
	I915_WRITE(DPLL(pipe), val);
1721
	I915_WRITE(DPLL(pipe), val);
1738
	POSTING_READ(DPLL(pipe));
1722
	POSTING_READ(DPLL(pipe));
1739
 
1723
 
1740
	mutex_lock(&dev_priv->dpio_lock);
1724
	mutex_lock(&dev_priv->dpio_lock);
1741
 
1725
 
1742
	/* Disable 10bit clock to display controller */
1726
	/* Disable 10bit clock to display controller */
1743
	val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1727
	val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1744
	val &= ~DPIO_DCLKP_EN;
1728
	val &= ~DPIO_DCLKP_EN;
1745
	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val);
1729
	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val);
1746
 
1730
 
1747
	/* disable left/right clock distribution */
1731
	/* disable left/right clock distribution */
1748
	if (pipe != PIPE_B) {
1732
	if (pipe != PIPE_B) {
1749
		val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
1733
		val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
1750
		val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
1734
		val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
1751
		vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
1735
		vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
1752
	} else {
1736
	} else {
1753
		val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
1737
		val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
1754
		val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
1738
		val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
1755
		vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
1739
		vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
1756
	}
1740
	}
1757
 
1741
 
1758
	mutex_unlock(&dev_priv->dpio_lock);
1742
	mutex_unlock(&dev_priv->dpio_lock);
1759
}
1743
}
1760
 
1744
 
1761
void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
1745
void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
1762
		struct intel_digital_port *dport)
1746
		struct intel_digital_port *dport)
1763
{
1747
{
1764
	u32 port_mask;
1748
	u32 port_mask;
1765
	int dpll_reg;
1749
	int dpll_reg;
1766
 
1750
 
1767
	switch (dport->port) {
1751
	switch (dport->port) {
1768
	case PORT_B:
1752
	case PORT_B:
1769
		port_mask = DPLL_PORTB_READY_MASK;
1753
		port_mask = DPLL_PORTB_READY_MASK;
1770
		dpll_reg = DPLL(0);
1754
		dpll_reg = DPLL(0);
1771
		break;
1755
		break;
1772
	case PORT_C:
1756
	case PORT_C:
1773
		port_mask = DPLL_PORTC_READY_MASK;
1757
		port_mask = DPLL_PORTC_READY_MASK;
1774
		dpll_reg = DPLL(0);
1758
		dpll_reg = DPLL(0);
1775
		break;
1759
		break;
1776
	case PORT_D:
1760
	case PORT_D:
1777
		port_mask = DPLL_PORTD_READY_MASK;
1761
		port_mask = DPLL_PORTD_READY_MASK;
1778
		dpll_reg = DPIO_PHY_STATUS;
1762
		dpll_reg = DPIO_PHY_STATUS;
1779
		break;
1763
		break;
1780
	default:
1764
	default:
1781
		BUG();
1765
		BUG();
1782
	}
1766
	}
1783
 
1767
 
1784
	if (wait_for((I915_READ(dpll_reg) & port_mask) == 0, 1000))
1768
	if (wait_for((I915_READ(dpll_reg) & port_mask) == 0, 1000))
1785
		WARN(1, "timed out waiting for port %c ready: 0x%08x\n",
1769
		WARN(1, "timed out waiting for port %c ready: 0x%08x\n",
1786
		     port_name(dport->port), I915_READ(dpll_reg));
1770
		     port_name(dport->port), I915_READ(dpll_reg));
1787
}
1771
}
1788
 
1772
 
1789
static void intel_prepare_shared_dpll(struct intel_crtc *crtc)
1773
static void intel_prepare_shared_dpll(struct intel_crtc *crtc)
1790
{
1774
{
1791
	struct drm_device *dev = crtc->base.dev;
1775
	struct drm_device *dev = crtc->base.dev;
1792
	struct drm_i915_private *dev_priv = dev->dev_private;
1776
	struct drm_i915_private *dev_priv = dev->dev_private;
1793
	struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
1777
	struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
1794
 
1778
 
1795
	if (WARN_ON(pll == NULL))
1779
	if (WARN_ON(pll == NULL))
1796
		return;
1780
		return;
1797
 
1781
 
1798
	WARN_ON(!pll->refcount);
1782
	WARN_ON(!pll->config.crtc_mask);
1799
	if (pll->active == 0) {
1783
	if (pll->active == 0) {
1800
		DRM_DEBUG_DRIVER("setting up %s\n", pll->name);
1784
		DRM_DEBUG_DRIVER("setting up %s\n", pll->name);
1801
		WARN_ON(pll->on);
1785
		WARN_ON(pll->on);
1802
		assert_shared_dpll_disabled(dev_priv, pll);
1786
		assert_shared_dpll_disabled(dev_priv, pll);
1803
 
1787
 
1804
		pll->mode_set(dev_priv, pll);
1788
		pll->mode_set(dev_priv, pll);
1805
	}
1789
	}
1806
}
1790
}
1807
 
1791
 
1808
/**
1792
/**
1809
 * intel_enable_shared_dpll - enable PCH PLL
1793
 * intel_enable_shared_dpll - enable PCH PLL
1810
 * @dev_priv: i915 private structure
1794
 * @dev_priv: i915 private structure
1811
 * @pipe: pipe PLL to enable
1795
 * @pipe: pipe PLL to enable
1812
 *
1796
 *
1813
 * The PCH PLL needs to be enabled before the PCH transcoder, since it
1797
 * The PCH PLL needs to be enabled before the PCH transcoder, since it
1814
 * drives the transcoder clock.
1798
 * drives the transcoder clock.
1815
 */
1799
 */
1816
static void intel_enable_shared_dpll(struct intel_crtc *crtc)
1800
static void intel_enable_shared_dpll(struct intel_crtc *crtc)
1817
{
1801
{
1818
	struct drm_device *dev = crtc->base.dev;
1802
	struct drm_device *dev = crtc->base.dev;
1819
	struct drm_i915_private *dev_priv = dev->dev_private;
1803
	struct drm_i915_private *dev_priv = dev->dev_private;
1820
	struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
1804
	struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
1821
 
1805
 
1822
	if (WARN_ON(pll == NULL))
1806
	if (WARN_ON(pll == NULL))
1823
		return;
1807
		return;
1824
 
1808
 
1825
	if (WARN_ON(pll->refcount == 0))
1809
	if (WARN_ON(pll->config.crtc_mask == 0))
1826
		return;
1810
		return;
1827
 
1811
 
1828
	DRM_DEBUG_KMS("enable %s (active %d, on? %d)for crtc %d\n",
1812
	DRM_DEBUG_KMS("enable %s (active %d, on? %d) for crtc %d\n",
1829
		      pll->name, pll->active, pll->on,
1813
		      pll->name, pll->active, pll->on,
1830
		      crtc->base.base.id);
1814
		      crtc->base.base.id);
1831
 
1815
 
1832
	if (pll->active++) {
1816
	if (pll->active++) {
1833
		WARN_ON(!pll->on);
1817
		WARN_ON(!pll->on);
1834
		assert_shared_dpll_enabled(dev_priv, pll);
1818
		assert_shared_dpll_enabled(dev_priv, pll);
1835
		return;
1819
		return;
1836
	}
1820
	}
1837
	WARN_ON(pll->on);
1821
	WARN_ON(pll->on);
1838
 
1822
 
1839
	intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS);
1823
	intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS);
1840
 
1824
 
1841
	DRM_DEBUG_KMS("enabling %s\n", pll->name);
1825
	DRM_DEBUG_KMS("enabling %s\n", pll->name);
1842
	pll->enable(dev_priv, pll);
1826
	pll->enable(dev_priv, pll);
1843
	pll->on = true;
1827
	pll->on = true;
1844
}
1828
}
1845
 
1829
 
1846
void intel_disable_shared_dpll(struct intel_crtc *crtc)
1830
static void intel_disable_shared_dpll(struct intel_crtc *crtc)
1847
{
1831
{
1848
	struct drm_device *dev = crtc->base.dev;
1832
	struct drm_device *dev = crtc->base.dev;
1849
	struct drm_i915_private *dev_priv = dev->dev_private;
1833
	struct drm_i915_private *dev_priv = dev->dev_private;
1850
	struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
1834
	struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
1851
 
1835
 
1852
	/* PCH only available on ILK+ */
1836
	/* PCH only available on ILK+ */
1853
	BUG_ON(INTEL_INFO(dev)->gen < 5);
1837
	BUG_ON(INTEL_INFO(dev)->gen < 5);
1854
	if (WARN_ON(pll == NULL))
1838
	if (WARN_ON(pll == NULL))
1855
	       return;
1839
	       return;
1856
 
1840
 
1857
	if (WARN_ON(pll->refcount == 0))
1841
	if (WARN_ON(pll->config.crtc_mask == 0))
1858
		return;
1842
		return;
1859
 
1843
 
1860
	DRM_DEBUG_KMS("disable %s (active %d, on? %d) for crtc %d\n",
1844
	DRM_DEBUG_KMS("disable %s (active %d, on? %d) for crtc %d\n",
1861
		      pll->name, pll->active, pll->on,
1845
		      pll->name, pll->active, pll->on,
1862
		      crtc->base.base.id);
1846
		      crtc->base.base.id);
1863
 
1847
 
1864
	if (WARN_ON(pll->active == 0)) {
1848
	if (WARN_ON(pll->active == 0)) {
1865
		assert_shared_dpll_disabled(dev_priv, pll);
1849
		assert_shared_dpll_disabled(dev_priv, pll);
1866
		return;
1850
		return;
1867
	}
1851
	}
1868
 
1852
 
1869
	assert_shared_dpll_enabled(dev_priv, pll);
1853
	assert_shared_dpll_enabled(dev_priv, pll);
1870
	WARN_ON(!pll->on);
1854
	WARN_ON(!pll->on);
1871
	if (--pll->active)
1855
	if (--pll->active)
1872
		return;
1856
		return;
1873
 
1857
 
1874
	DRM_DEBUG_KMS("disabling %s\n", pll->name);
1858
	DRM_DEBUG_KMS("disabling %s\n", pll->name);
1875
	pll->disable(dev_priv, pll);
1859
	pll->disable(dev_priv, pll);
1876
	pll->on = false;
1860
	pll->on = false;
1877
 
1861
 
1878
	intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
1862
	intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
1879
}
1863
}
1880
 
1864
 
1881
static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1865
static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1882
				    enum pipe pipe)
1866
				    enum pipe pipe)
1883
{
1867
{
1884
	struct drm_device *dev = dev_priv->dev;
1868
	struct drm_device *dev = dev_priv->dev;
1885
	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
1869
	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
1886
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1870
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1887
	uint32_t reg, val, pipeconf_val;
1871
	uint32_t reg, val, pipeconf_val;
1888
 
1872
 
1889
	/* PCH only available on ILK+ */
1873
	/* PCH only available on ILK+ */
1890
	BUG_ON(INTEL_INFO(dev)->gen < 5);
1874
	BUG_ON(!HAS_PCH_SPLIT(dev));
1891
 
1875
 
1892
	/* Make sure PCH DPLL is enabled */
1876
	/* Make sure PCH DPLL is enabled */
1893
	assert_shared_dpll_enabled(dev_priv,
1877
	assert_shared_dpll_enabled(dev_priv,
1894
				   intel_crtc_to_shared_dpll(intel_crtc));
1878
				   intel_crtc_to_shared_dpll(intel_crtc));
1895
 
1879
 
1896
	/* FDI must be feeding us bits for PCH ports */
1880
	/* FDI must be feeding us bits for PCH ports */
1897
	assert_fdi_tx_enabled(dev_priv, pipe);
1881
	assert_fdi_tx_enabled(dev_priv, pipe);
1898
	assert_fdi_rx_enabled(dev_priv, pipe);
1882
	assert_fdi_rx_enabled(dev_priv, pipe);
1899
 
1883
 
1900
	if (HAS_PCH_CPT(dev)) {
1884
	if (HAS_PCH_CPT(dev)) {
1901
		/* Workaround: Set the timing override bit before enabling the
1885
		/* Workaround: Set the timing override bit before enabling the
1902
		 * pch transcoder. */
1886
		 * pch transcoder. */
1903
		reg = TRANS_CHICKEN2(pipe);
1887
		reg = TRANS_CHICKEN2(pipe);
1904
		val = I915_READ(reg);
1888
		val = I915_READ(reg);
1905
		val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1889
		val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1906
		I915_WRITE(reg, val);
1890
		I915_WRITE(reg, val);
1907
	}
1891
	}
1908
 
1892
 
1909
	reg = PCH_TRANSCONF(pipe);
1893
	reg = PCH_TRANSCONF(pipe);
1910
	val = I915_READ(reg);
1894
	val = I915_READ(reg);
1911
	pipeconf_val = I915_READ(PIPECONF(pipe));
1895
	pipeconf_val = I915_READ(PIPECONF(pipe));
1912
 
1896
 
1913
	if (HAS_PCH_IBX(dev_priv->dev)) {
1897
	if (HAS_PCH_IBX(dev_priv->dev)) {
1914
		/*
1898
		/*
1915
		 * make the BPC in transcoder be consistent with
1899
		 * make the BPC in transcoder be consistent with
1916
		 * that in pipeconf reg.
1900
		 * that in pipeconf reg.
1917
		 */
1901
		 */
1918
		val &= ~PIPECONF_BPC_MASK;
1902
		val &= ~PIPECONF_BPC_MASK;
1919
		val |= pipeconf_val & PIPECONF_BPC_MASK;
1903
		val |= pipeconf_val & PIPECONF_BPC_MASK;
1920
	}
1904
	}
1921
 
1905
 
1922
	val &= ~TRANS_INTERLACE_MASK;
1906
	val &= ~TRANS_INTERLACE_MASK;
1923
	if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK)
1907
	if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK)
1924
		if (HAS_PCH_IBX(dev_priv->dev) &&
1908
		if (HAS_PCH_IBX(dev_priv->dev) &&
1925
		    intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO))
1909
		    intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO))
1926
			val |= TRANS_LEGACY_INTERLACED_ILK;
1910
			val |= TRANS_LEGACY_INTERLACED_ILK;
1927
		else
1911
		else
1928
			val |= TRANS_INTERLACED;
1912
			val |= TRANS_INTERLACED;
1929
	else
1913
	else
1930
		val |= TRANS_PROGRESSIVE;
1914
		val |= TRANS_PROGRESSIVE;
1931
 
1915
 
1932
	I915_WRITE(reg, val | TRANS_ENABLE);
1916
	I915_WRITE(reg, val | TRANS_ENABLE);
1933
	if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
1917
	if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
1934
		DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe));
1918
		DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe));
1935
}
1919
}
1936
 
1920
 
1937
static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1921
static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1938
				      enum transcoder cpu_transcoder)
1922
				      enum transcoder cpu_transcoder)
1939
{
1923
{
1940
	u32 val, pipeconf_val;
1924
	u32 val, pipeconf_val;
1941
 
1925
 
1942
	/* PCH only available on ILK+ */
1926
	/* PCH only available on ILK+ */
1943
	BUG_ON(INTEL_INFO(dev_priv->dev)->gen < 5);
1927
	BUG_ON(!HAS_PCH_SPLIT(dev_priv->dev));
1944
 
1928
 
1945
	/* FDI must be feeding us bits for PCH ports */
1929
	/* FDI must be feeding us bits for PCH ports */
1946
	assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
1930
	assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
1947
	assert_fdi_rx_enabled(dev_priv, TRANSCODER_A);
1931
	assert_fdi_rx_enabled(dev_priv, TRANSCODER_A);
1948
 
1932
 
1949
	/* Workaround: set timing override bit. */
1933
	/* Workaround: set timing override bit. */
1950
	val = I915_READ(_TRANSA_CHICKEN2);
1934
	val = I915_READ(_TRANSA_CHICKEN2);
1951
	val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1935
	val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1952
	I915_WRITE(_TRANSA_CHICKEN2, val);
1936
	I915_WRITE(_TRANSA_CHICKEN2, val);
1953
 
1937
 
1954
	val = TRANS_ENABLE;
1938
	val = TRANS_ENABLE;
1955
	pipeconf_val = I915_READ(PIPECONF(cpu_transcoder));
1939
	pipeconf_val = I915_READ(PIPECONF(cpu_transcoder));
1956
 
1940
 
1957
	if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
1941
	if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
1958
	    PIPECONF_INTERLACED_ILK)
1942
	    PIPECONF_INTERLACED_ILK)
1959
		val |= TRANS_INTERLACED;
1943
		val |= TRANS_INTERLACED;
1960
	else
1944
	else
1961
		val |= TRANS_PROGRESSIVE;
1945
		val |= TRANS_PROGRESSIVE;
1962
 
1946
 
1963
	I915_WRITE(LPT_TRANSCONF, val);
1947
	I915_WRITE(LPT_TRANSCONF, val);
1964
	if (wait_for(I915_READ(LPT_TRANSCONF) & TRANS_STATE_ENABLE, 100))
1948
	if (wait_for(I915_READ(LPT_TRANSCONF) & TRANS_STATE_ENABLE, 100))
1965
		DRM_ERROR("Failed to enable PCH transcoder\n");
1949
		DRM_ERROR("Failed to enable PCH transcoder\n");
1966
}
1950
}
1967
 
1951
 
1968
static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
1952
static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
1969
				     enum pipe pipe)
1953
				     enum pipe pipe)
1970
{
1954
{
1971
	struct drm_device *dev = dev_priv->dev;
1955
	struct drm_device *dev = dev_priv->dev;
1972
	uint32_t reg, val;
1956
	uint32_t reg, val;
1973
 
1957
 
1974
	/* FDI relies on the transcoder */
1958
	/* FDI relies on the transcoder */
1975
	assert_fdi_tx_disabled(dev_priv, pipe);
1959
	assert_fdi_tx_disabled(dev_priv, pipe);
1976
	assert_fdi_rx_disabled(dev_priv, pipe);
1960
	assert_fdi_rx_disabled(dev_priv, pipe);
1977
 
1961
 
1978
	/* Ports must be off as well */
1962
	/* Ports must be off as well */
1979
	assert_pch_ports_disabled(dev_priv, pipe);
1963
	assert_pch_ports_disabled(dev_priv, pipe);
1980
 
1964
 
1981
	reg = PCH_TRANSCONF(pipe);
1965
	reg = PCH_TRANSCONF(pipe);
1982
	val = I915_READ(reg);
1966
	val = I915_READ(reg);
1983
	val &= ~TRANS_ENABLE;
1967
	val &= ~TRANS_ENABLE;
1984
	I915_WRITE(reg, val);
1968
	I915_WRITE(reg, val);
1985
	/* wait for PCH transcoder off, transcoder state */
1969
	/* wait for PCH transcoder off, transcoder state */
1986
	if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50))
1970
	if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50))
1987
		DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe));
1971
		DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe));
1988
 
1972
 
1989
	if (!HAS_PCH_IBX(dev)) {
1973
	if (!HAS_PCH_IBX(dev)) {
1990
		/* Workaround: Clear the timing override chicken bit again. */
1974
		/* Workaround: Clear the timing override chicken bit again. */
1991
		reg = TRANS_CHICKEN2(pipe);
1975
		reg = TRANS_CHICKEN2(pipe);
1992
		val = I915_READ(reg);
1976
		val = I915_READ(reg);
1993
		val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1977
		val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1994
		I915_WRITE(reg, val);
1978
		I915_WRITE(reg, val);
1995
	}
1979
	}
1996
}
1980
}
1997
 
1981
 
1998
static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
1982
static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
1999
{
1983
{
2000
	u32 val;
1984
	u32 val;
2001
 
1985
 
2002
	val = I915_READ(LPT_TRANSCONF);
1986
	val = I915_READ(LPT_TRANSCONF);
2003
	val &= ~TRANS_ENABLE;
1987
	val &= ~TRANS_ENABLE;
2004
	I915_WRITE(LPT_TRANSCONF, val);
1988
	I915_WRITE(LPT_TRANSCONF, val);
2005
	/* wait for PCH transcoder off, transcoder state */
1989
	/* wait for PCH transcoder off, transcoder state */
2006
	if (wait_for((I915_READ(LPT_TRANSCONF) & TRANS_STATE_ENABLE) == 0, 50))
1990
	if (wait_for((I915_READ(LPT_TRANSCONF) & TRANS_STATE_ENABLE) == 0, 50))
2007
		DRM_ERROR("Failed to disable PCH transcoder\n");
1991
		DRM_ERROR("Failed to disable PCH transcoder\n");
2008
 
1992
 
2009
	/* Workaround: clear timing override bit. */
1993
	/* Workaround: clear timing override bit. */
2010
	val = I915_READ(_TRANSA_CHICKEN2);
1994
	val = I915_READ(_TRANSA_CHICKEN2);
2011
	val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1995
	val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
2012
	I915_WRITE(_TRANSA_CHICKEN2, val);
1996
	I915_WRITE(_TRANSA_CHICKEN2, val);
2013
}
1997
}
2014
 
1998
 
2015
/**
1999
/**
2016
 * intel_enable_pipe - enable a pipe, asserting requirements
2000
 * intel_enable_pipe - enable a pipe, asserting requirements
2017
 * @crtc: crtc responsible for the pipe
2001
 * @crtc: crtc responsible for the pipe
2018
 *
2002
 *
2019
 * Enable @crtc's pipe, making sure that various hardware specific requirements
2003
 * Enable @crtc's pipe, making sure that various hardware specific requirements
2020
 * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc.
2004
 * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc.
2021
 */
2005
 */
2022
static void intel_enable_pipe(struct intel_crtc *crtc)
2006
static void intel_enable_pipe(struct intel_crtc *crtc)
2023
{
2007
{
2024
	struct drm_device *dev = crtc->base.dev;
2008
	struct drm_device *dev = crtc->base.dev;
2025
	struct drm_i915_private *dev_priv = dev->dev_private;
2009
	struct drm_i915_private *dev_priv = dev->dev_private;
2026
	enum pipe pipe = crtc->pipe;
2010
	enum pipe pipe = crtc->pipe;
2027
	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
2011
	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
2028
								      pipe);
2012
								      pipe);
2029
	enum pipe pch_transcoder;
2013
	enum pipe pch_transcoder;
2030
	int reg;
2014
	int reg;
2031
	u32 val;
2015
	u32 val;
2032
 
2016
 
2033
	assert_planes_disabled(dev_priv, pipe);
2017
	assert_planes_disabled(dev_priv, pipe);
2034
	assert_cursor_disabled(dev_priv, pipe);
2018
	assert_cursor_disabled(dev_priv, pipe);
2035
	assert_sprites_disabled(dev_priv, pipe);
2019
	assert_sprites_disabled(dev_priv, pipe);
2036
 
2020
 
2037
	if (HAS_PCH_LPT(dev_priv->dev))
2021
	if (HAS_PCH_LPT(dev_priv->dev))
2038
		pch_transcoder = TRANSCODER_A;
2022
		pch_transcoder = TRANSCODER_A;
2039
	else
2023
	else
2040
		pch_transcoder = pipe;
2024
		pch_transcoder = pipe;
2041
 
2025
 
2042
	/*
2026
	/*
2043
	 * A pipe without a PLL won't actually be able to drive bits from
2027
	 * A pipe without a PLL won't actually be able to drive bits from
2044
	 * a plane.  On ILK+ the pipe PLLs are integrated, so we don't
2028
	 * a plane.  On ILK+ the pipe PLLs are integrated, so we don't
2045
	 * need the check.
2029
	 * need the check.
2046
	 */
2030
	 */
2047
	if (!HAS_PCH_SPLIT(dev_priv->dev))
2031
	if (!HAS_PCH_SPLIT(dev_priv->dev))
2048
		if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DSI))
2032
		if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI))
2049
			assert_dsi_pll_enabled(dev_priv);
2033
			assert_dsi_pll_enabled(dev_priv);
2050
		else
2034
		else
2051
		assert_pll_enabled(dev_priv, pipe);
2035
		assert_pll_enabled(dev_priv, pipe);
2052
	else {
2036
	else {
2053
		if (crtc->config.has_pch_encoder) {
2037
		if (crtc->config.has_pch_encoder) {
2054
			/* if driving the PCH, we need FDI enabled */
2038
			/* if driving the PCH, we need FDI enabled */
2055
			assert_fdi_rx_pll_enabled(dev_priv, pch_transcoder);
2039
			assert_fdi_rx_pll_enabled(dev_priv, pch_transcoder);
2056
			assert_fdi_tx_pll_enabled(dev_priv,
2040
			assert_fdi_tx_pll_enabled(dev_priv,
2057
						  (enum pipe) cpu_transcoder);
2041
						  (enum pipe) cpu_transcoder);
2058
		}
2042
		}
2059
		/* FIXME: assert CPU port conditions for SNB+ */
2043
		/* FIXME: assert CPU port conditions for SNB+ */
2060
	}
2044
	}
2061
 
2045
 
2062
	reg = PIPECONF(cpu_transcoder);
2046
	reg = PIPECONF(cpu_transcoder);
2063
	val = I915_READ(reg);
2047
	val = I915_READ(reg);
2064
	if (val & PIPECONF_ENABLE) {
2048
	if (val & PIPECONF_ENABLE) {
2065
		WARN_ON(!(pipe == PIPE_A &&
2049
		WARN_ON(!((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
2066
			  dev_priv->quirks & QUIRK_PIPEA_FORCE));
2050
			  (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE)));
2067
		return;
2051
		return;
2068
	}
2052
	}
2069
 
2053
 
2070
	I915_WRITE(reg, val | PIPECONF_ENABLE);
2054
	I915_WRITE(reg, val | PIPECONF_ENABLE);
2071
	POSTING_READ(reg);
2055
	POSTING_READ(reg);
2072
}
2056
}
2073
 
2057
 
2074
/**
2058
/**
2075
 * intel_disable_pipe - disable a pipe, asserting requirements
2059
 * intel_disable_pipe - disable a pipe, asserting requirements
2076
 * @dev_priv: i915 private structure
-
 
2077
 * @pipe: pipe to disable
2060
 * @crtc: crtc whose pipes is to be disabled
2078
 *
-
 
2079
 * Disable @pipe, making sure that various hardware specific requirements
-
 
2080
 * are met, if applicable, e.g. plane disabled, panel fitter off, etc.
-
 
2081
 *
2061
 *
-
 
2062
 * Disable the pipe of @crtc, making sure that various hardware
-
 
2063
 * specific requirements are met, if applicable, e.g. plane
2082
 * @pipe should be %PIPE_A or %PIPE_B.
2064
 * disabled, panel fitter off, etc.
2083
 *
2065
 *
2084
 * Will wait until the pipe has shut down before returning.
2066
 * Will wait until the pipe has shut down before returning.
2085
 */
2067
 */
2086
static void intel_disable_pipe(struct drm_i915_private *dev_priv,
2068
static void intel_disable_pipe(struct intel_crtc *crtc)
2087
			       enum pipe pipe)
-
 
2088
{
2069
{
-
 
2070
	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
2089
	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
2071
	enum transcoder cpu_transcoder = crtc->config.cpu_transcoder;
2090
								      pipe);
2072
	enum pipe pipe = crtc->pipe;
2091
	int reg;
2073
	int reg;
2092
	u32 val;
2074
	u32 val;
2093
 
2075
 
2094
    /*
2076
    /*
2095
	 * Make sure planes won't keep trying to pump pixels to us,
2077
	 * Make sure planes won't keep trying to pump pixels to us,
2096
	 * or we might hang the display.
2078
	 * or we might hang the display.
2097
	 */
2079
	 */
2098
	assert_planes_disabled(dev_priv, pipe);
2080
	assert_planes_disabled(dev_priv, pipe);
2099
	assert_cursor_disabled(dev_priv, pipe);
2081
	assert_cursor_disabled(dev_priv, pipe);
2100
	assert_sprites_disabled(dev_priv, pipe);
2082
	assert_sprites_disabled(dev_priv, pipe);
2101
 
-
 
2102
	/* Don't disable pipe A or pipe A PLLs if needed */
-
 
2103
	if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
-
 
2104
		return;
-
 
2105
 
2083
 
2106
	reg = PIPECONF(cpu_transcoder);
2084
	reg = PIPECONF(cpu_transcoder);
2107
	val = I915_READ(reg);
2085
	val = I915_READ(reg);
2108
	if ((val & PIPECONF_ENABLE) == 0)
2086
	if ((val & PIPECONF_ENABLE) == 0)
2109
		return;
2087
		return;
-
 
2088
 
-
 
2089
	/*
-
 
2090
	 * Double wide has implications for planes
-
 
2091
	 * so best keep it disabled when not needed.
-
 
2092
	 */
-
 
2093
	if (crtc->config.double_wide)
-
 
2094
		val &= ~PIPECONF_DOUBLE_WIDE;
-
 
2095
 
-
 
2096
	/* Don't disable pipe or pipe PLLs if needed */
-
 
2097
	if (!(pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) &&
2110
 
2098
	    !(pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
-
 
2099
		val &= ~PIPECONF_ENABLE;
-
 
2100
 
-
 
2101
	I915_WRITE(reg, val);
2111
	I915_WRITE(reg, val & ~PIPECONF_ENABLE);
2102
	if ((val & PIPECONF_ENABLE) == 0)
2112
	intel_wait_for_pipe_off(dev_priv->dev, pipe);
2103
		intel_wait_for_pipe_off(crtc);
2113
}
2104
}
2114
 
2105
 
2115
/*
2106
/*
2116
 * Plane regs are double buffered, going from enabled->disabled needs a
2107
 * Plane regs are double buffered, going from enabled->disabled needs a
2117
 * trigger in order to latch.  The display address reg provides this.
2108
 * trigger in order to latch.  The display address reg provides this.
2118
 */
2109
 */
2119
void intel_flush_primary_plane(struct drm_i915_private *dev_priv,
2110
void intel_flush_primary_plane(struct drm_i915_private *dev_priv,
2120
				      enum plane plane)
2111
				      enum plane plane)
2121
{
2112
{
2122
	struct drm_device *dev = dev_priv->dev;
2113
	struct drm_device *dev = dev_priv->dev;
2123
	u32 reg = INTEL_INFO(dev)->gen >= 4 ? DSPSURF(plane) : DSPADDR(plane);
2114
	u32 reg = INTEL_INFO(dev)->gen >= 4 ? DSPSURF(plane) : DSPADDR(plane);
2124
 
2115
 
2125
	I915_WRITE(reg, I915_READ(reg));
2116
	I915_WRITE(reg, I915_READ(reg));
2126
	POSTING_READ(reg);
2117
	POSTING_READ(reg);
2127
}
2118
}
2128
 
2119
 
2129
/**
2120
/**
2130
 * intel_enable_primary_hw_plane - enable the primary plane on a given pipe
2121
 * intel_enable_primary_hw_plane - enable the primary plane on a given pipe
2131
 * @dev_priv: i915 private structure
-
 
2132
 * @plane: plane to enable
2122
 * @plane:  plane to be enabled
2133
 * @pipe: pipe being fed
2123
 * @crtc: crtc for the plane
2134
 *
2124
 *
2135
 * Enable @plane on @pipe, making sure that @pipe is running first.
2125
 * Enable @plane on @crtc, making sure that the pipe is running first.
2136
 */
2126
 */
2137
static void intel_enable_primary_hw_plane(struct drm_i915_private *dev_priv,
2127
static void intel_enable_primary_hw_plane(struct drm_plane *plane,
2138
			       enum plane plane, enum pipe pipe)
2128
					  struct drm_crtc *crtc)
2139
{
2129
{
2140
	struct drm_device *dev = dev_priv->dev;
2130
	struct drm_device *dev = plane->dev;
2141
	struct intel_crtc *intel_crtc =
2131
	struct drm_i915_private *dev_priv = dev->dev_private;
2142
		to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
2132
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2143
	int reg;
-
 
2144
	u32 val;
-
 
2145
 
2133
 
2146
	/* If the pipe isn't enabled, we can't pump pixels and may hang */
2134
	/* If the pipe isn't enabled, we can't pump pixels and may hang */
2147
	assert_pipe_enabled(dev_priv, pipe);
2135
	assert_pipe_enabled(dev_priv, intel_crtc->pipe);
2148
 
2136
 
2149
	if (intel_crtc->primary_enabled)
2137
	if (intel_crtc->primary_enabled)
2150
		return;
2138
		return;
2151
 
2139
 
2152
	intel_crtc->primary_enabled = true;
2140
	intel_crtc->primary_enabled = true;
2153
 
2141
 
2154
	reg = DSPCNTR(plane);
2142
	dev_priv->display.update_primary_plane(crtc, plane->fb,
2155
	val = I915_READ(reg);
-
 
-
 
2143
					       crtc->x, crtc->y);
-
 
2144
 
2156
	WARN_ON(val & DISPLAY_PLANE_ENABLE);
2145
	/*
-
 
2146
	 * BDW signals flip done immediately if the plane
-
 
2147
	 * is disabled, even if the plane enable is already
-
 
2148
	 * armed to occur at the next vblank :(
2157
 
2149
	 */
2158
	I915_WRITE(reg, val | DISPLAY_PLANE_ENABLE);
2150
	if (IS_BROADWELL(dev))
2159
	intel_flush_primary_plane(dev_priv, plane);
2151
		intel_wait_for_vblank(dev, intel_crtc->pipe);
2160
}
2152
}
2161
 
2153
 
2162
/**
2154
/**
2163
 * intel_disable_primary_hw_plane - disable the primary hardware plane
2155
 * intel_disable_primary_hw_plane - disable the primary hardware plane
2164
 * @dev_priv: i915 private structure
-
 
2165
 * @plane: plane to disable
2156
 * @plane: plane to be disabled
2166
 * @pipe: pipe consuming the data
2157
 * @crtc: crtc for the plane
2167
 *
2158
 *
2168
 * Disable @plane; should be an independent operation.
2159
 * Disable @plane on @crtc, making sure that the pipe is running first.
2169
 */
2160
 */
2170
static void intel_disable_primary_hw_plane(struct drm_i915_private *dev_priv,
2161
static void intel_disable_primary_hw_plane(struct drm_plane *plane,
2171
				enum plane plane, enum pipe pipe)
2162
					   struct drm_crtc *crtc)
2172
{
2163
{
2173
	struct intel_crtc *intel_crtc =
2164
	struct drm_device *dev = plane->dev;
2174
		to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
2165
	struct drm_i915_private *dev_priv = dev->dev_private;
2175
	int reg;
2166
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2176
	u32 val;
2167
 
-
 
2168
	assert_pipe_enabled(dev_priv, intel_crtc->pipe);
2177
 
2169
 
2178
	if (!intel_crtc->primary_enabled)
2170
	if (!intel_crtc->primary_enabled)
2179
		return;
2171
		return;
2180
 
2172
 
2181
	intel_crtc->primary_enabled = false;
2173
	intel_crtc->primary_enabled = false;
2182
 
-
 
2183
	reg = DSPCNTR(plane);
-
 
2184
	val = I915_READ(reg);
2174
 
2185
	WARN_ON((val & DISPLAY_PLANE_ENABLE) == 0);
-
 
2186
 
-
 
2187
	I915_WRITE(reg, val & ~DISPLAY_PLANE_ENABLE);
2175
	dev_priv->display.update_primary_plane(crtc, plane->fb,
2188
	intel_flush_primary_plane(dev_priv, plane);
2176
					       crtc->x, crtc->y);
2189
}
2177
}
2190
 
2178
 
2191
static bool need_vtd_wa(struct drm_device *dev)
2179
static bool need_vtd_wa(struct drm_device *dev)
2192
{
2180
{
2193
#ifdef CONFIG_INTEL_IOMMU
2181
#ifdef CONFIG_INTEL_IOMMU
2194
	if (INTEL_INFO(dev)->gen >= 6 && intel_iommu_gfx_mapped)
2182
	if (INTEL_INFO(dev)->gen >= 6 && intel_iommu_gfx_mapped)
2195
		return true;
2183
		return true;
2196
#endif
2184
#endif
2197
	return false;
2185
	return false;
2198
}
2186
}
2199
 
2187
 
2200
static int intel_align_height(struct drm_device *dev, int height, bool tiled)
2188
static int intel_align_height(struct drm_device *dev, int height, bool tiled)
2201
{
2189
{
2202
	int tile_height;
2190
	int tile_height;
2203
 
2191
 
2204
	tile_height = tiled ? (IS_GEN2(dev) ? 16 : 8) : 1;
2192
	tile_height = tiled ? (IS_GEN2(dev) ? 16 : 8) : 1;
2205
	return ALIGN(height, tile_height);
2193
	return ALIGN(height, tile_height);
2206
}
2194
}
2207
 
2195
 
2208
int
2196
int
2209
intel_pin_and_fence_fb_obj(struct drm_device *dev,
2197
intel_pin_and_fence_fb_obj(struct drm_plane *plane,
2210
			   struct drm_i915_gem_object *obj,
2198
			   struct drm_framebuffer *fb,
2211
			   struct intel_engine_cs *pipelined)
2199
			   struct intel_engine_cs *pipelined)
2212
{
2200
{
-
 
2201
	struct drm_device *dev = fb->dev;
2213
	struct drm_i915_private *dev_priv = dev->dev_private;
2202
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
2203
	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2214
	u32 alignment;
2204
	u32 alignment;
2215
	int ret;
2205
	int ret;
2216
 
2206
 
2217
	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
2207
	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
2218
 
2208
 
2219
	switch (obj->tiling_mode) {
2209
	switch (obj->tiling_mode) {
2220
	case I915_TILING_NONE:
2210
	case I915_TILING_NONE:
-
 
2211
		if (INTEL_INFO(dev)->gen >= 9)
-
 
2212
			alignment = 256 * 1024;
2221
		if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
2213
		else if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
2222
			alignment = 128 * 1024;
2214
			alignment = 128 * 1024;
2223
		else if (INTEL_INFO(dev)->gen >= 4)
2215
		else if (INTEL_INFO(dev)->gen >= 4)
2224
			alignment = 4 * 1024;
2216
			alignment = 4 * 1024;
2225
		else
2217
		else
2226
			alignment = 64 * 1024;
2218
			alignment = 64 * 1024;
2227
		break;
2219
		break;
2228
	case I915_TILING_X:
2220
	case I915_TILING_X:
-
 
2221
		if (INTEL_INFO(dev)->gen >= 9)
-
 
2222
			alignment = 256 * 1024;
-
 
2223
		else {
2229
		/* pin() will align the object as required by fence */
2224
		/* pin() will align the object as required by fence */
2230
		alignment = 0;
2225
		alignment = 0;
-
 
2226
		}
2231
		break;
2227
		break;
2232
	case I915_TILING_Y:
2228
	case I915_TILING_Y:
2233
		WARN(1, "Y tiled bo slipped through, driver bug!\n");
2229
		WARN(1, "Y tiled bo slipped through, driver bug!\n");
2234
		return -EINVAL;
2230
		return -EINVAL;
2235
	default:
2231
	default:
2236
		BUG();
2232
		BUG();
2237
	}
2233
	}
2238
 
2234
 
2239
	/* Note that the w/a also requires 64 PTE of padding following the
2235
	/* Note that the w/a also requires 64 PTE of padding following the
2240
	 * bo. We currently fill all unused PTE with the shadow page and so
2236
	 * bo. We currently fill all unused PTE with the shadow page and so
2241
	 * we should always have valid PTE following the scanout preventing
2237
	 * we should always have valid PTE following the scanout preventing
2242
	 * the VT-d warning.
2238
	 * the VT-d warning.
2243
	 */
2239
	 */
2244
	if (need_vtd_wa(dev) && alignment < 256 * 1024)
2240
	if (need_vtd_wa(dev) && alignment < 256 * 1024)
2245
		alignment = 256 * 1024;
2241
		alignment = 256 * 1024;
2246
 
2242
 
2247
	/*
2243
	/*
2248
	 * Global gtt pte registers are special registers which actually forward
2244
	 * Global gtt pte registers are special registers which actually forward
2249
	 * writes to a chunk of system memory. Which means that there is no risk
2245
	 * writes to a chunk of system memory. Which means that there is no risk
2250
	 * that the register values disappear as soon as we call
2246
	 * that the register values disappear as soon as we call
2251
	 * intel_runtime_pm_put(), so it is correct to wrap only the
2247
	 * intel_runtime_pm_put(), so it is correct to wrap only the
2252
	 * pin/unpin/fence and not more.
2248
	 * pin/unpin/fence and not more.
2253
	 */
2249
	 */
2254
	intel_runtime_pm_get(dev_priv);
2250
	intel_runtime_pm_get(dev_priv);
2255
 
2251
 
2256
	dev_priv->mm.interruptible = false;
2252
	dev_priv->mm.interruptible = false;
2257
	ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined);
2253
	ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined);
2258
	if (ret)
2254
	if (ret)
2259
		goto err_interruptible;
2255
		goto err_interruptible;
2260
 
2256
 
2261
	/* Install a fence for tiled scan-out. Pre-i965 always needs a
2257
	/* Install a fence for tiled scan-out. Pre-i965 always needs a
2262
	 * fence, whereas 965+ only requires a fence if using
2258
	 * fence, whereas 965+ only requires a fence if using
2263
	 * framebuffer compression.  For simplicity, we always install
2259
	 * framebuffer compression.  For simplicity, we always install
2264
	 * a fence as the cost is not that onerous.
2260
	 * a fence as the cost is not that onerous.
2265
	 */
2261
	 */
2266
	ret = i915_gem_object_get_fence(obj);
2262
	ret = i915_gem_object_get_fence(obj);
2267
	if (ret)
2263
	if (ret)
2268
		goto err_unpin;
2264
		goto err_unpin;
2269
 
2265
 
2270
	i915_gem_object_pin_fence(obj);
2266
	i915_gem_object_pin_fence(obj);
2271
 
2267
 
2272
	dev_priv->mm.interruptible = true;
2268
	dev_priv->mm.interruptible = true;
2273
	intel_runtime_pm_put(dev_priv);
2269
	intel_runtime_pm_put(dev_priv);
2274
	return 0;
2270
	return 0;
2275
 
2271
 
2276
err_unpin:
2272
err_unpin:
2277
	i915_gem_object_unpin_from_display_plane(obj);
2273
	i915_gem_object_unpin_from_display_plane(obj);
2278
err_interruptible:
2274
err_interruptible:
2279
	dev_priv->mm.interruptible = true;
2275
	dev_priv->mm.interruptible = true;
2280
	intel_runtime_pm_put(dev_priv);
2276
	intel_runtime_pm_put(dev_priv);
2281
	return ret;
2277
	return ret;
2282
}
2278
}
2283
 
2279
 
2284
void intel_unpin_fb_obj(struct drm_i915_gem_object *obj)
2280
void intel_unpin_fb_obj(struct drm_i915_gem_object *obj)
2285
{
2281
{
2286
	WARN_ON(!mutex_is_locked(&obj->base.dev->struct_mutex));
2282
	WARN_ON(!mutex_is_locked(&obj->base.dev->struct_mutex));
2287
 
2283
 
2288
	i915_gem_object_unpin_fence(obj);
2284
	i915_gem_object_unpin_fence(obj);
2289
//	i915_gem_object_unpin_from_display_plane(obj);
2285
//	i915_gem_object_unpin_from_display_plane(obj);
2290
}
2286
}
2291
 
2287
 
2292
/* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel
2288
/* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel
2293
 * is assumed to be a power-of-two. */
2289
 * is assumed to be a power-of-two. */
2294
unsigned long intel_gen4_compute_page_offset(int *x, int *y,
2290
unsigned long intel_gen4_compute_page_offset(int *x, int *y,
2295
					     unsigned int tiling_mode,
2291
					     unsigned int tiling_mode,
2296
					     unsigned int cpp,
2292
					     unsigned int cpp,
2297
							unsigned int pitch)
2293
							unsigned int pitch)
2298
{
2294
{
2299
	if (tiling_mode != I915_TILING_NONE) {
2295
	if (tiling_mode != I915_TILING_NONE) {
2300
		unsigned int tile_rows, tiles;
2296
		unsigned int tile_rows, tiles;
2301
 
2297
 
2302
	tile_rows = *y / 8;
2298
	tile_rows = *y / 8;
2303
	*y %= 8;
2299
	*y %= 8;
2304
 
2300
 
2305
		tiles = *x / (512/cpp);
2301
		tiles = *x / (512/cpp);
2306
		*x %= 512/cpp;
2302
		*x %= 512/cpp;
2307
 
2303
 
2308
	return tile_rows * pitch * 8 + tiles * 4096;
2304
	return tile_rows * pitch * 8 + tiles * 4096;
2309
	} else {
2305
	} else {
2310
		unsigned int offset;
2306
		unsigned int offset;
2311
 
2307
 
2312
		offset = *y * pitch + *x * cpp;
2308
		offset = *y * pitch + *x * cpp;
2313
		*y = 0;
2309
		*y = 0;
2314
		*x = (offset & 4095) / cpp;
2310
		*x = (offset & 4095) / cpp;
2315
		return offset & -4096;
2311
		return offset & -4096;
2316
	}
2312
	}
2317
}
2313
}
2318
 
2314
 
2319
int intel_format_to_fourcc(int format)
2315
int intel_format_to_fourcc(int format)
2320
{
2316
{
2321
	switch (format) {
2317
	switch (format) {
2322
	case DISPPLANE_8BPP:
2318
	case DISPPLANE_8BPP:
2323
		return DRM_FORMAT_C8;
2319
		return DRM_FORMAT_C8;
2324
	case DISPPLANE_BGRX555:
2320
	case DISPPLANE_BGRX555:
2325
		return DRM_FORMAT_XRGB1555;
2321
		return DRM_FORMAT_XRGB1555;
2326
	case DISPPLANE_BGRX565:
2322
	case DISPPLANE_BGRX565:
2327
		return DRM_FORMAT_RGB565;
2323
		return DRM_FORMAT_RGB565;
2328
	default:
2324
	default:
2329
	case DISPPLANE_BGRX888:
2325
	case DISPPLANE_BGRX888:
2330
		return DRM_FORMAT_XRGB8888;
2326
		return DRM_FORMAT_XRGB8888;
2331
	case DISPPLANE_RGBX888:
2327
	case DISPPLANE_RGBX888:
2332
		return DRM_FORMAT_XBGR8888;
2328
		return DRM_FORMAT_XBGR8888;
2333
	case DISPPLANE_BGRX101010:
2329
	case DISPPLANE_BGRX101010:
2334
		return DRM_FORMAT_XRGB2101010;
2330
		return DRM_FORMAT_XRGB2101010;
2335
	case DISPPLANE_RGBX101010:
2331
	case DISPPLANE_RGBX101010:
2336
		return DRM_FORMAT_XBGR2101010;
2332
		return DRM_FORMAT_XBGR2101010;
2337
	}
2333
	}
2338
}
2334
}
2339
 
2335
 
2340
static bool intel_alloc_plane_obj(struct intel_crtc *crtc,
2336
static bool intel_alloc_plane_obj(struct intel_crtc *crtc,
2341
				  struct intel_plane_config *plane_config)
2337
				  struct intel_plane_config *plane_config)
2342
{
2338
{
2343
	struct drm_device *dev = crtc->base.dev;
2339
	struct drm_device *dev = crtc->base.dev;
2344
	struct drm_i915_gem_object *obj = NULL;
2340
	struct drm_i915_gem_object *obj = NULL;
2345
	struct drm_mode_fb_cmd2 mode_cmd = { 0 };
2341
	struct drm_mode_fb_cmd2 mode_cmd = { 0 };
2346
	u32 base = plane_config->base;
2342
	u32 base = plane_config->base;
2347
 
2343
 
2348
	if (plane_config->size == 0)
2344
	if (plane_config->size == 0)
2349
		return false;
2345
		return false;
2350
 
2346
 
2351
	obj = i915_gem_object_create_stolen_for_preallocated(dev, base, base,
2347
	obj = i915_gem_object_create_stolen_for_preallocated(dev, base, base,
2352
							     plane_config->size);
2348
							     plane_config->size);
2353
	if (!obj)
2349
	if (!obj)
2354
		return false;
2350
		return false;
2355
 
2351
 
2356
    main_fb_obj = obj;
2352
    main_fb_obj = obj;
2357
 
2353
 
2358
	if (plane_config->tiled) {
2354
	if (plane_config->tiled) {
2359
		obj->tiling_mode = I915_TILING_X;
2355
		obj->tiling_mode = I915_TILING_X;
2360
		obj->stride = crtc->base.primary->fb->pitches[0];
2356
		obj->stride = crtc->base.primary->fb->pitches[0];
2361
	}
2357
	}
2362
 
2358
 
2363
	mode_cmd.pixel_format = crtc->base.primary->fb->pixel_format;
2359
	mode_cmd.pixel_format = crtc->base.primary->fb->pixel_format;
2364
	mode_cmd.width = crtc->base.primary->fb->width;
2360
	mode_cmd.width = crtc->base.primary->fb->width;
2365
	mode_cmd.height = crtc->base.primary->fb->height;
2361
	mode_cmd.height = crtc->base.primary->fb->height;
2366
	mode_cmd.pitches[0] = crtc->base.primary->fb->pitches[0];
2362
	mode_cmd.pitches[0] = crtc->base.primary->fb->pitches[0];
2367
 
2363
 
2368
	mutex_lock(&dev->struct_mutex);
2364
	mutex_lock(&dev->struct_mutex);
2369
 
2365
 
2370
	if (intel_framebuffer_init(dev, to_intel_framebuffer(crtc->base.primary->fb),
2366
	if (intel_framebuffer_init(dev, to_intel_framebuffer(crtc->base.primary->fb),
2371
				   &mode_cmd, obj)) {
2367
				   &mode_cmd, obj)) {
2372
		DRM_DEBUG_KMS("intel fb init failed\n");
2368
		DRM_DEBUG_KMS("intel fb init failed\n");
2373
		goto out_unref_obj;
2369
		goto out_unref_obj;
2374
	}
2370
	}
2375
 
2371
 
2376
	obj->frontbuffer_bits = INTEL_FRONTBUFFER_PRIMARY(crtc->pipe);
2372
	obj->frontbuffer_bits = INTEL_FRONTBUFFER_PRIMARY(crtc->pipe);
2377
	mutex_unlock(&dev->struct_mutex);
2373
	mutex_unlock(&dev->struct_mutex);
2378
 
2374
 
2379
	DRM_DEBUG_KMS("plane fb obj %p\n", obj);
2375
	DRM_DEBUG_KMS("plane fb obj %p\n", obj);
2380
	return true;
2376
	return true;
2381
 
2377
 
2382
out_unref_obj:
2378
out_unref_obj:
2383
	drm_gem_object_unreference(&obj->base);
2379
	drm_gem_object_unreference(&obj->base);
2384
	mutex_unlock(&dev->struct_mutex);
2380
	mutex_unlock(&dev->struct_mutex);
2385
	return false;
2381
	return false;
2386
}
2382
}
2387
 
2383
 
2388
static void intel_find_plane_obj(struct intel_crtc *intel_crtc,
2384
static void intel_find_plane_obj(struct intel_crtc *intel_crtc,
2389
				 struct intel_plane_config *plane_config)
2385
				 struct intel_plane_config *plane_config)
2390
{
2386
{
2391
	struct drm_device *dev = intel_crtc->base.dev;
2387
	struct drm_device *dev = intel_crtc->base.dev;
-
 
2388
	struct drm_i915_private *dev_priv = dev->dev_private;
2392
	struct drm_crtc *c;
2389
	struct drm_crtc *c;
2393
	struct intel_crtc *i;
2390
	struct intel_crtc *i;
2394
	struct drm_i915_gem_object *obj;
2391
	struct drm_i915_gem_object *obj;
2395
 
2392
 
2396
	if (!intel_crtc->base.primary->fb)
2393
	if (!intel_crtc->base.primary->fb)
2397
		return;
2394
		return;
2398
 
2395
 
2399
	if (intel_alloc_plane_obj(intel_crtc, plane_config))
2396
	if (intel_alloc_plane_obj(intel_crtc, plane_config))
2400
		return;
2397
		return;
2401
 
2398
 
2402
	kfree(intel_crtc->base.primary->fb);
2399
	kfree(intel_crtc->base.primary->fb);
2403
	intel_crtc->base.primary->fb = NULL;
2400
	intel_crtc->base.primary->fb = NULL;
2404
 
2401
 
2405
	/*
2402
	/*
2406
	 * Failed to alloc the obj, check to see if we should share
2403
	 * Failed to alloc the obj, check to see if we should share
2407
	 * an fb with another CRTC instead
2404
	 * an fb with another CRTC instead
2408
	 */
2405
	 */
2409
	for_each_crtc(dev, c) {
2406
	for_each_crtc(dev, c) {
2410
		i = to_intel_crtc(c);
2407
		i = to_intel_crtc(c);
2411
 
2408
 
2412
		if (c == &intel_crtc->base)
2409
		if (c == &intel_crtc->base)
2413
			continue;
2410
			continue;
2414
 
2411
 
2415
		if (!i->active)
2412
		if (!i->active)
2416
			continue;
2413
			continue;
2417
 
2414
 
2418
		obj = intel_fb_obj(c->primary->fb);
2415
		obj = intel_fb_obj(c->primary->fb);
2419
		if (obj == NULL)
2416
		if (obj == NULL)
2420
			continue;
2417
			continue;
2421
 
2418
 
2422
		if (i915_gem_obj_ggtt_offset(obj) == plane_config->base) {
2419
		if (i915_gem_obj_ggtt_offset(obj) == plane_config->base) {
-
 
2420
			if (obj->tiling_mode != I915_TILING_NONE)
-
 
2421
				dev_priv->preserve_bios_swizzle = true;
-
 
2422
 
2423
			drm_framebuffer_reference(c->primary->fb);
2423
			drm_framebuffer_reference(c->primary->fb);
2424
			intel_crtc->base.primary->fb = c->primary->fb;
2424
			intel_crtc->base.primary->fb = c->primary->fb;
2425
			obj->frontbuffer_bits |= INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe);
2425
			obj->frontbuffer_bits |= INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe);
2426
			break;
2426
			break;
2427
		}
2427
		}
2428
	}
2428
	}
2429
}
2429
}
2430
 
2430
 
2431
static void i9xx_update_primary_plane(struct drm_crtc *crtc,
2431
static void i9xx_update_primary_plane(struct drm_crtc *crtc,
2432
				     struct drm_framebuffer *fb,
2432
				     struct drm_framebuffer *fb,
2433
				     int x, int y)
2433
				     int x, int y)
2434
{
2434
{
2435
    struct drm_device *dev = crtc->dev;
2435
    struct drm_device *dev = crtc->dev;
2436
    struct drm_i915_private *dev_priv = dev->dev_private;
2436
    struct drm_i915_private *dev_priv = dev->dev_private;
2437
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2437
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2438
	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2438
	struct drm_i915_gem_object *obj;
2439
    int plane = intel_crtc->plane;
2439
    int plane = intel_crtc->plane;
2440
	unsigned long linear_offset;
2440
	unsigned long linear_offset;
2441
    u32 dspcntr;
2441
    u32 dspcntr;
-
 
2442
	u32 reg = DSPCNTR(plane);
-
 
2443
	int pixel_size;
-
 
2444
 
-
 
2445
	if (!intel_crtc->primary_enabled) {
-
 
2446
		I915_WRITE(reg, 0);
-
 
2447
		if (INTEL_INFO(dev)->gen >= 4)
-
 
2448
			I915_WRITE(DSPSURF(plane), 0);
-
 
2449
		else
-
 
2450
			I915_WRITE(DSPADDR(plane), 0);
-
 
2451
		POSTING_READ(reg);
2442
    u32 reg;
2452
		return;
-
 
2453
	}
-
 
2454
 
-
 
2455
	obj = intel_fb_obj(fb);
-
 
2456
	if (WARN_ON(obj == NULL))
-
 
2457
		return;
-
 
2458
 
-
 
2459
	pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
-
 
2460
 
-
 
2461
	dspcntr = DISPPLANE_GAMMA_ENABLE;
-
 
2462
 
-
 
2463
	dspcntr |= DISPLAY_PLANE_ENABLE;
-
 
2464
 
-
 
2465
	if (INTEL_INFO(dev)->gen < 4) {
-
 
2466
		if (intel_crtc->pipe == PIPE_B)
-
 
2467
			dspcntr |= DISPPLANE_SEL_PIPE_B;
-
 
2468
 
-
 
2469
		/* pipesrc and dspsize control the size that is scaled from,
-
 
2470
		 * which should always be the user's requested size.
-
 
2471
		 */
-
 
2472
		I915_WRITE(DSPSIZE(plane),
-
 
2473
			   ((intel_crtc->config.pipe_src_h - 1) << 16) |
-
 
2474
			   (intel_crtc->config.pipe_src_w - 1));
-
 
2475
		I915_WRITE(DSPPOS(plane), 0);
-
 
2476
	} else if (IS_CHERRYVIEW(dev) && plane == PLANE_B) {
-
 
2477
		I915_WRITE(PRIMSIZE(plane),
-
 
2478
			   ((intel_crtc->config.pipe_src_h - 1) << 16) |
-
 
2479
			   (intel_crtc->config.pipe_src_w - 1));
-
 
2480
		I915_WRITE(PRIMPOS(plane), 0);
-
 
2481
		I915_WRITE(PRIMCNSTALPHA(plane), 0);
-
 
2482
	}
2443
 
-
 
2444
    reg = DSPCNTR(plane);
-
 
2445
    dspcntr = I915_READ(reg);
-
 
2446
    /* Mask out pixel format bits in case we change it */
-
 
2447
    dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
2483
 
2448
	switch (fb->pixel_format) {
2484
	switch (fb->pixel_format) {
2449
	case DRM_FORMAT_C8:
2485
	case DRM_FORMAT_C8:
2450
        dspcntr |= DISPPLANE_8BPP;
2486
        dspcntr |= DISPPLANE_8BPP;
2451
        break;
2487
        break;
2452
	case DRM_FORMAT_XRGB1555:
2488
	case DRM_FORMAT_XRGB1555:
2453
	case DRM_FORMAT_ARGB1555:
2489
	case DRM_FORMAT_ARGB1555:
2454
		dspcntr |= DISPPLANE_BGRX555;
2490
		dspcntr |= DISPPLANE_BGRX555;
2455
		break;
2491
		break;
2456
	case DRM_FORMAT_RGB565:
2492
	case DRM_FORMAT_RGB565:
2457
		dspcntr |= DISPPLANE_BGRX565;
2493
		dspcntr |= DISPPLANE_BGRX565;
2458
		break;
2494
		break;
2459
	case DRM_FORMAT_XRGB8888:
2495
	case DRM_FORMAT_XRGB8888:
2460
	case DRM_FORMAT_ARGB8888:
2496
	case DRM_FORMAT_ARGB8888:
2461
		dspcntr |= DISPPLANE_BGRX888;
2497
		dspcntr |= DISPPLANE_BGRX888;
2462
		break;
2498
		break;
2463
	case DRM_FORMAT_XBGR8888:
2499
	case DRM_FORMAT_XBGR8888:
2464
	case DRM_FORMAT_ABGR8888:
2500
	case DRM_FORMAT_ABGR8888:
2465
		dspcntr |= DISPPLANE_RGBX888;
2501
		dspcntr |= DISPPLANE_RGBX888;
2466
		break;
2502
		break;
2467
	case DRM_FORMAT_XRGB2101010:
2503
	case DRM_FORMAT_XRGB2101010:
2468
	case DRM_FORMAT_ARGB2101010:
2504
	case DRM_FORMAT_ARGB2101010:
2469
		dspcntr |= DISPPLANE_BGRX101010;
2505
		dspcntr |= DISPPLANE_BGRX101010;
2470
        break;
2506
        break;
2471
	case DRM_FORMAT_XBGR2101010:
2507
	case DRM_FORMAT_XBGR2101010:
2472
	case DRM_FORMAT_ABGR2101010:
2508
	case DRM_FORMAT_ABGR2101010:
2473
		dspcntr |= DISPPLANE_RGBX101010;
2509
		dspcntr |= DISPPLANE_RGBX101010;
2474
        break;
2510
        break;
2475
    default:
2511
    default:
2476
		BUG();
2512
		BUG();
2477
    }
2513
    }
2478
 
2514
 
2479
    if (INTEL_INFO(dev)->gen >= 4) {
2515
	if (INTEL_INFO(dev)->gen >= 4 &&
2480
        if (obj->tiling_mode != I915_TILING_NONE)
2516
	    obj->tiling_mode != I915_TILING_NONE)
2481
            dspcntr |= DISPPLANE_TILED;
-
 
2482
        else
-
 
2483
            dspcntr &= ~DISPPLANE_TILED;
-
 
2484
    }
2517
            dspcntr |= DISPPLANE_TILED;
2485
 
2518
 
2486
	if (IS_G4X(dev))
2519
	if (IS_G4X(dev))
2487
		dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
2520
		dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
2488
 
-
 
2489
    I915_WRITE(reg, dspcntr);
-
 
2490
 
2521
 
2491
	linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
2522
	linear_offset = y * fb->pitches[0] + x * pixel_size;
2492
 
2523
 
2493
	if (INTEL_INFO(dev)->gen >= 4) {
2524
	if (INTEL_INFO(dev)->gen >= 4) {
2494
		intel_crtc->dspaddr_offset =
2525
		intel_crtc->dspaddr_offset =
2495
			intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
2526
			intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
2496
							   fb->bits_per_pixel / 8,
2527
						       pixel_size,
2497
							   fb->pitches[0]);
2528
							   fb->pitches[0]);
2498
		linear_offset -= intel_crtc->dspaddr_offset;
2529
		linear_offset -= intel_crtc->dspaddr_offset;
2499
	} else {
2530
	} else {
2500
		intel_crtc->dspaddr_offset = linear_offset;
2531
		intel_crtc->dspaddr_offset = linear_offset;
2501
	}
2532
	}
-
 
2533
 
-
 
2534
	if (to_intel_plane(crtc->primary)->rotation == BIT(DRM_ROTATE_180)) {
-
 
2535
		dspcntr |= DISPPLANE_ROTATE_180;
-
 
2536
 
-
 
2537
		x += (intel_crtc->config.pipe_src_w - 1);
-
 
2538
		y += (intel_crtc->config.pipe_src_h - 1);
-
 
2539
 
-
 
2540
		/* Finding the last pixel of the last line of the display
-
 
2541
		data and adding to linear_offset*/
-
 
2542
		linear_offset +=
-
 
2543
			(intel_crtc->config.pipe_src_h - 1) * fb->pitches[0] +
-
 
2544
			(intel_crtc->config.pipe_src_w - 1) * pixel_size;
-
 
2545
	}
-
 
2546
 
-
 
2547
	I915_WRITE(reg, dspcntr);
2502
 
2548
 
2503
	DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
2549
	DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
2504
		      i915_gem_obj_ggtt_offset(obj), linear_offset, x, y,
2550
		      i915_gem_obj_ggtt_offset(obj), linear_offset, x, y,
2505
		      fb->pitches[0]);
2551
		      fb->pitches[0]);
2506
	I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
2552
	I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
2507
    if (INTEL_INFO(dev)->gen >= 4) {
2553
    if (INTEL_INFO(dev)->gen >= 4) {
2508
		I915_WRITE(DSPSURF(plane),
2554
		I915_WRITE(DSPSURF(plane),
2509
				     i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
2555
				     i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
2510
        I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
2556
        I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
2511
		I915_WRITE(DSPLINOFF(plane), linear_offset);
2557
		I915_WRITE(DSPLINOFF(plane), linear_offset);
2512
    } else
2558
    } else
2513
		I915_WRITE(DSPADDR(plane), i915_gem_obj_ggtt_offset(obj) + linear_offset);
2559
		I915_WRITE(DSPADDR(plane), i915_gem_obj_ggtt_offset(obj) + linear_offset);
2514
    POSTING_READ(reg);
2560
    POSTING_READ(reg);
2515
}
2561
}
2516
 
2562
 
2517
static void ironlake_update_primary_plane(struct drm_crtc *crtc,
2563
static void ironlake_update_primary_plane(struct drm_crtc *crtc,
2518
					 struct drm_framebuffer *fb,
2564
					 struct drm_framebuffer *fb,
2519
					 int x, int y)
2565
					 int x, int y)
2520
{
2566
{
2521
    struct drm_device *dev = crtc->dev;
2567
    struct drm_device *dev = crtc->dev;
2522
    struct drm_i915_private *dev_priv = dev->dev_private;
2568
    struct drm_i915_private *dev_priv = dev->dev_private;
2523
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2569
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2524
	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2570
	struct drm_i915_gem_object *obj;
2525
    int plane = intel_crtc->plane;
2571
    int plane = intel_crtc->plane;
2526
	unsigned long linear_offset;
2572
	unsigned long linear_offset;
2527
    u32 dspcntr;
2573
    u32 dspcntr;
-
 
2574
	u32 reg = DSPCNTR(plane);
-
 
2575
	int pixel_size;
-
 
2576
 
-
 
2577
	if (!intel_crtc->primary_enabled) {
-
 
2578
		I915_WRITE(reg, 0);
-
 
2579
		I915_WRITE(DSPSURF(plane), 0);
-
 
2580
		POSTING_READ(reg);
2528
    u32 reg;
2581
		return;
-
 
2582
	}
-
 
2583
 
-
 
2584
	obj = intel_fb_obj(fb);
-
 
2585
	if (WARN_ON(obj == NULL))
-
 
2586
		return;
-
 
2587
 
-
 
2588
	pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
-
 
2589
 
-
 
2590
	dspcntr = DISPPLANE_GAMMA_ENABLE;
-
 
2591
 
-
 
2592
	dspcntr |= DISPLAY_PLANE_ENABLE;
-
 
2593
 
-
 
2594
	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
-
 
2595
		dspcntr |= DISPPLANE_PIPE_CSC_ENABLE;
2529
 
-
 
2530
    reg = DSPCNTR(plane);
-
 
2531
    dspcntr = I915_READ(reg);
-
 
2532
    /* Mask out pixel format bits in case we change it */
-
 
2533
    dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
2596
 
2534
	switch (fb->pixel_format) {
2597
	switch (fb->pixel_format) {
2535
	case DRM_FORMAT_C8:
2598
	case DRM_FORMAT_C8:
2536
        dspcntr |= DISPPLANE_8BPP;
2599
        dspcntr |= DISPPLANE_8BPP;
2537
        break;
2600
        break;
2538
	case DRM_FORMAT_RGB565:
2601
	case DRM_FORMAT_RGB565:
2539
		dspcntr |= DISPPLANE_BGRX565;
2602
		dspcntr |= DISPPLANE_BGRX565;
2540
        break;
2603
        break;
2541
	case DRM_FORMAT_XRGB8888:
2604
	case DRM_FORMAT_XRGB8888:
2542
	case DRM_FORMAT_ARGB8888:
2605
	case DRM_FORMAT_ARGB8888:
2543
		dspcntr |= DISPPLANE_BGRX888;
2606
		dspcntr |= DISPPLANE_BGRX888;
2544
		break;
2607
		break;
2545
	case DRM_FORMAT_XBGR8888:
2608
	case DRM_FORMAT_XBGR8888:
2546
	case DRM_FORMAT_ABGR8888:
2609
	case DRM_FORMAT_ABGR8888:
2547
		dspcntr |= DISPPLANE_RGBX888;
2610
		dspcntr |= DISPPLANE_RGBX888;
2548
		break;
2611
		break;
2549
	case DRM_FORMAT_XRGB2101010:
2612
	case DRM_FORMAT_XRGB2101010:
2550
	case DRM_FORMAT_ARGB2101010:
2613
	case DRM_FORMAT_ARGB2101010:
2551
		dspcntr |= DISPPLANE_BGRX101010;
2614
		dspcntr |= DISPPLANE_BGRX101010;
2552
		break;
2615
		break;
2553
	case DRM_FORMAT_XBGR2101010:
2616
	case DRM_FORMAT_XBGR2101010:
2554
	case DRM_FORMAT_ABGR2101010:
2617
	case DRM_FORMAT_ABGR2101010:
2555
		dspcntr |= DISPPLANE_RGBX101010;
2618
		dspcntr |= DISPPLANE_RGBX101010;
2556
        break;
2619
        break;
2557
    default:
2620
    default:
2558
		BUG();
2621
		BUG();
2559
    }
2622
    }
2560
 
2623
 
2561
	if (obj->tiling_mode != I915_TILING_NONE)
2624
	if (obj->tiling_mode != I915_TILING_NONE)
2562
		dspcntr |= DISPPLANE_TILED;
2625
		dspcntr |= DISPPLANE_TILED;
2563
	else
-
 
2564
        dspcntr &= ~DISPPLANE_TILED;
-
 
2565
 
2626
 
2566
	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
-
 
2567
		dspcntr &= ~DISPPLANE_TRICKLE_FEED_DISABLE;
-
 
2568
	else
2627
	if (!IS_HASWELL(dev) && !IS_BROADWELL(dev))
2569
    dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
-
 
2570
 
-
 
2571
    I915_WRITE(reg, dspcntr);
2628
    dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
2572
 
2629
 
2573
	linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
2630
	linear_offset = y * fb->pitches[0] + x * pixel_size;
2574
	intel_crtc->dspaddr_offset =
2631
	intel_crtc->dspaddr_offset =
2575
		intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
2632
		intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
2576
						   fb->bits_per_pixel / 8,
2633
					       pixel_size,
-
 
2634
						   fb->pitches[0]);
-
 
2635
	linear_offset -= intel_crtc->dspaddr_offset;
-
 
2636
	if (to_intel_plane(crtc->primary)->rotation == BIT(DRM_ROTATE_180)) {
-
 
2637
		dspcntr |= DISPPLANE_ROTATE_180;
-
 
2638
 
-
 
2639
		if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) {
-
 
2640
			x += (intel_crtc->config.pipe_src_w - 1);
-
 
2641
			y += (intel_crtc->config.pipe_src_h - 1);
-
 
2642
 
-
 
2643
			/* Finding the last pixel of the last line of the display
-
 
2644
			data and adding to linear_offset*/
-
 
2645
			linear_offset +=
-
 
2646
				(intel_crtc->config.pipe_src_h - 1) * fb->pitches[0] +
-
 
2647
				(intel_crtc->config.pipe_src_w - 1) * pixel_size;
-
 
2648
		}
-
 
2649
	}
2577
						   fb->pitches[0]);
2650
 
2578
	linear_offset -= intel_crtc->dspaddr_offset;
2651
	I915_WRITE(reg, dspcntr);
2579
 
2652
 
2580
	DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
2653
	DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
2581
		      i915_gem_obj_ggtt_offset(obj), linear_offset, x, y,
2654
		      i915_gem_obj_ggtt_offset(obj), linear_offset, x, y,
2582
		      fb->pitches[0]);
2655
		      fb->pitches[0]);
2583
	I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
2656
	I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
2584
	I915_WRITE(DSPSURF(plane),
2657
	I915_WRITE(DSPSURF(plane),
2585
			     i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
2658
			     i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
2586
	if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
2659
	if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
2587
		I915_WRITE(DSPOFFSET(plane), (y << 16) | x);
2660
		I915_WRITE(DSPOFFSET(plane), (y << 16) | x);
2588
	} else {
2661
	} else {
2589
	I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
2662
	I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
2590
	I915_WRITE(DSPLINOFF(plane), linear_offset);
2663
	I915_WRITE(DSPLINOFF(plane), linear_offset);
2591
	}
2664
	}
2592
	POSTING_READ(reg);
2665
	POSTING_READ(reg);
2593
}
2666
}
-
 
2667
 
-
 
2668
static void skylake_update_primary_plane(struct drm_crtc *crtc,
-
 
2669
					 struct drm_framebuffer *fb,
-
 
2670
					 int x, int y)
-
 
2671
{
-
 
2672
	struct drm_device *dev = crtc->dev;
-
 
2673
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
2674
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-
 
2675
	struct intel_framebuffer *intel_fb;
-
 
2676
	struct drm_i915_gem_object *obj;
-
 
2677
	int pipe = intel_crtc->pipe;
-
 
2678
	u32 plane_ctl, stride;
-
 
2679
 
-
 
2680
	if (!intel_crtc->primary_enabled) {
-
 
2681
		I915_WRITE(PLANE_CTL(pipe, 0), 0);
-
 
2682
		I915_WRITE(PLANE_SURF(pipe, 0), 0);
-
 
2683
		POSTING_READ(PLANE_CTL(pipe, 0));
-
 
2684
		return;
-
 
2685
	}
-
 
2686
 
-
 
2687
	plane_ctl = PLANE_CTL_ENABLE |
-
 
2688
		    PLANE_CTL_PIPE_GAMMA_ENABLE |
-
 
2689
		    PLANE_CTL_PIPE_CSC_ENABLE;
-
 
2690
 
-
 
2691
	switch (fb->pixel_format) {
-
 
2692
	case DRM_FORMAT_RGB565:
-
 
2693
		plane_ctl |= PLANE_CTL_FORMAT_RGB_565;
-
 
2694
		break;
-
 
2695
	case DRM_FORMAT_XRGB8888:
-
 
2696
		plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888;
-
 
2697
		break;
-
 
2698
	case DRM_FORMAT_XBGR8888:
-
 
2699
		plane_ctl |= PLANE_CTL_ORDER_RGBX;
-
 
2700
		plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888;
-
 
2701
		break;
-
 
2702
	case DRM_FORMAT_XRGB2101010:
-
 
2703
		plane_ctl |= PLANE_CTL_FORMAT_XRGB_2101010;
-
 
2704
		break;
-
 
2705
	case DRM_FORMAT_XBGR2101010:
-
 
2706
		plane_ctl |= PLANE_CTL_ORDER_RGBX;
-
 
2707
		plane_ctl |= PLANE_CTL_FORMAT_XRGB_2101010;
-
 
2708
		break;
-
 
2709
	default:
-
 
2710
		BUG();
-
 
2711
	}
-
 
2712
 
-
 
2713
	intel_fb = to_intel_framebuffer(fb);
-
 
2714
	obj = intel_fb->obj;
-
 
2715
 
-
 
2716
	/*
-
 
2717
	 * The stride is either expressed as a multiple of 64 bytes chunks for
-
 
2718
	 * linear buffers or in number of tiles for tiled buffers.
-
 
2719
	 */
-
 
2720
	switch (obj->tiling_mode) {
-
 
2721
	case I915_TILING_NONE:
-
 
2722
		stride = fb->pitches[0] >> 6;
-
 
2723
		break;
-
 
2724
	case I915_TILING_X:
-
 
2725
		plane_ctl |= PLANE_CTL_TILED_X;
-
 
2726
		stride = fb->pitches[0] >> 9;
-
 
2727
		break;
-
 
2728
	default:
-
 
2729
		BUG();
-
 
2730
	}
-
 
2731
 
-
 
2732
	plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE;
-
 
2733
	if (to_intel_plane(crtc->primary)->rotation == BIT(DRM_ROTATE_180))
-
 
2734
		plane_ctl |= PLANE_CTL_ROTATE_180;
-
 
2735
 
-
 
2736
	I915_WRITE(PLANE_CTL(pipe, 0), plane_ctl);
-
 
2737
 
-
 
2738
	DRM_DEBUG_KMS("Writing base %08lX %d,%d,%d,%d pitch=%d\n",
-
 
2739
		      i915_gem_obj_ggtt_offset(obj),
-
 
2740
		      x, y, fb->width, fb->height,
-
 
2741
		      fb->pitches[0]);
-
 
2742
 
-
 
2743
	I915_WRITE(PLANE_POS(pipe, 0), 0);
-
 
2744
	I915_WRITE(PLANE_OFFSET(pipe, 0), (y << 16) | x);
-
 
2745
	I915_WRITE(PLANE_SIZE(pipe, 0),
-
 
2746
		   (intel_crtc->config.pipe_src_h - 1) << 16 |
-
 
2747
		   (intel_crtc->config.pipe_src_w - 1));
-
 
2748
	I915_WRITE(PLANE_STRIDE(pipe, 0), stride);
-
 
2749
	I915_WRITE(PLANE_SURF(pipe, 0), i915_gem_obj_ggtt_offset(obj));
-
 
2750
 
-
 
2751
	POSTING_READ(PLANE_SURF(pipe, 0));
-
 
2752
}
2594
 
2753
 
2595
/* Assume fb object is pinned & idle & fenced and just update base pointers */
2754
/* Assume fb object is pinned & idle & fenced and just update base pointers */
2596
static int
2755
static int
2597
intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
2756
intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
2598
			   int x, int y, enum mode_set_atomic state)
2757
			   int x, int y, enum mode_set_atomic state)
2599
{
2758
{
2600
	struct drm_device *dev = crtc->dev;
2759
	struct drm_device *dev = crtc->dev;
2601
	struct drm_i915_private *dev_priv = dev->dev_private;
2760
	struct drm_i915_private *dev_priv = dev->dev_private;
2602
 
2761
 
2603
	if (dev_priv->display.disable_fbc)
2762
	if (dev_priv->display.disable_fbc)
2604
		dev_priv->display.disable_fbc(dev);
2763
		dev_priv->display.disable_fbc(dev);
2605
	intel_increase_pllclock(dev, to_intel_crtc(crtc)->pipe);
-
 
2606
 
2764
 
2607
	dev_priv->display.update_primary_plane(crtc, fb, x, y);
2765
	dev_priv->display.update_primary_plane(crtc, fb, x, y);
2608
 
2766
 
2609
	return 0;
2767
	return 0;
2610
}
2768
}
2611
 
2769
 
2612
#if 0
2770
#if 0
2613
void intel_display_handle_reset(struct drm_device *dev)
2771
static void intel_complete_page_flips(struct drm_device *dev)
2614
{
2772
{
2615
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
2616
	struct drm_crtc *crtc;
2773
	struct drm_crtc *crtc;
2617
 
-
 
2618
	/*
-
 
2619
	 * Flips in the rings have been nuked by the reset,
-
 
2620
	 * so complete all pending flips so that user space
-
 
2621
	 * will get its events and not get stuck.
-
 
2622
	 *
-
 
2623
	 * Also update the base address of all primary
-
 
2624
	 * planes to the the last fb to make sure we're
-
 
2625
	 * showing the correct fb after a reset.
-
 
2626
	 *
-
 
2627
	 * Need to make two loops over the crtcs so that we
-
 
2628
	 * don't try to grab a crtc mutex before the
-
 
2629
	 * pending_flip_queue really got woken up.
-
 
2630
	 */
-
 
2631
 
2774
 
2632
	for_each_crtc(dev, crtc) {
2775
	for_each_crtc(dev, crtc) {
2633
		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2776
		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2634
		enum plane plane = intel_crtc->plane;
2777
		enum plane plane = intel_crtc->plane;
2635
 
2778
 
2636
		intel_prepare_page_flip(dev, plane);
2779
		intel_prepare_page_flip(dev, plane);
2637
		intel_finish_page_flip_plane(dev, plane);
2780
		intel_finish_page_flip_plane(dev, plane);
2638
	}
2781
	}
-
 
2782
}
-
 
2783
 
-
 
2784
static void intel_update_primary_planes(struct drm_device *dev)
-
 
2785
{
-
 
2786
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
2787
	struct drm_crtc *crtc;
2639
 
2788
 
2640
	for_each_crtc(dev, crtc) {
2789
	for_each_crtc(dev, crtc) {
2641
		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2790
		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2642
 
2791
 
2643
		drm_modeset_lock(&crtc->mutex, NULL);
2792
		drm_modeset_lock(&crtc->mutex, NULL);
2644
		/*
2793
		/*
2645
		 * FIXME: Once we have proper support for primary planes (and
2794
		 * FIXME: Once we have proper support for primary planes (and
2646
		 * disabling them without disabling the entire crtc) allow again
2795
		 * disabling them without disabling the entire crtc) allow again
2647
		 * a NULL crtc->primary->fb.
2796
		 * a NULL crtc->primary->fb.
2648
		 */
2797
		 */
2649
		if (intel_crtc->active && crtc->primary->fb)
2798
		if (intel_crtc->active && crtc->primary->fb)
2650
			dev_priv->display.update_primary_plane(crtc,
2799
			dev_priv->display.update_primary_plane(crtc,
2651
							       crtc->primary->fb,
2800
							       crtc->primary->fb,
2652
							       crtc->x,
2801
							       crtc->x,
2653
							       crtc->y);
2802
							       crtc->y);
2654
		drm_modeset_unlock(&crtc->mutex);
2803
		drm_modeset_unlock(&crtc->mutex);
2655
	}
2804
	}
2656
}
2805
}
-
 
2806
 
-
 
2807
void intel_prepare_reset(struct drm_device *dev)
-
 
2808
{
-
 
2809
	struct drm_i915_private *dev_priv = to_i915(dev);
-
 
2810
	struct intel_crtc *crtc;
-
 
2811
 
-
 
2812
	/* no reset support for gen2 */
-
 
2813
	if (IS_GEN2(dev))
-
 
2814
		return;
-
 
2815
 
-
 
2816
	/* reset doesn't touch the display */
-
 
2817
	if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
-
 
2818
		return;
-
 
2819
 
-
 
2820
	drm_modeset_lock_all(dev);
-
 
2821
 
-
 
2822
	/*
-
 
2823
	 * Disabling the crtcs gracefully seems nicer. Also the
-
 
2824
	 * g33 docs say we should at least disable all the planes.
-
 
2825
	 */
-
 
2826
	for_each_intel_crtc(dev, crtc) {
-
 
2827
		if (crtc->active)
-
 
2828
			dev_priv->display.crtc_disable(&crtc->base);
-
 
2829
	}
-
 
2830
}
-
 
2831
 
-
 
2832
void intel_finish_reset(struct drm_device *dev)
-
 
2833
{
-
 
2834
	struct drm_i915_private *dev_priv = to_i915(dev);
-
 
2835
 
-
 
2836
	/*
-
 
2837
	 * Flips in the rings will be nuked by the reset,
-
 
2838
	 * so complete all pending flips so that user space
-
 
2839
	 * will get its events and not get stuck.
-
 
2840
	 */
-
 
2841
	intel_complete_page_flips(dev);
-
 
2842
 
-
 
2843
	/* no reset support for gen2 */
-
 
2844
	if (IS_GEN2(dev))
-
 
2845
		return;
-
 
2846
 
-
 
2847
	/* reset doesn't touch the display */
-
 
2848
	if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) {
-
 
2849
		/*
-
 
2850
		 * Flips in the rings have been nuked by the reset,
-
 
2851
		 * so update the base address of all primary
-
 
2852
		 * planes to the the last fb to make sure we're
-
 
2853
		 * showing the correct fb after a reset.
-
 
2854
		 */
-
 
2855
		intel_update_primary_planes(dev);
-
 
2856
		return;
-
 
2857
	}
-
 
2858
 
-
 
2859
	/*
-
 
2860
	 * The display has been reset as well,
-
 
2861
	 * so need a full re-initialization.
-
 
2862
	 */
-
 
2863
	intel_runtime_pm_disable_interrupts(dev_priv);
-
 
2864
	intel_runtime_pm_enable_interrupts(dev_priv);
-
 
2865
 
-
 
2866
	intel_modeset_init_hw(dev);
-
 
2867
 
-
 
2868
	spin_lock_irq(&dev_priv->irq_lock);
-
 
2869
	if (dev_priv->display.hpd_irq_setup)
-
 
2870
		dev_priv->display.hpd_irq_setup(dev);
-
 
2871
	spin_unlock_irq(&dev_priv->irq_lock);
-
 
2872
 
-
 
2873
	intel_modeset_setup_hw_state(dev, true);
-
 
2874
 
-
 
2875
	intel_hpd_init(dev_priv);
-
 
2876
 
-
 
2877
	drm_modeset_unlock_all(dev);
-
 
2878
}
2657
 
2879
 
2658
static int
2880
static int
2659
intel_finish_fb(struct drm_framebuffer *old_fb)
2881
intel_finish_fb(struct drm_framebuffer *old_fb)
2660
{
2882
{
2661
	struct drm_i915_gem_object *obj = intel_fb_obj(old_fb);
2883
	struct drm_i915_gem_object *obj = intel_fb_obj(old_fb);
2662
	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2884
	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2663
	bool was_interruptible = dev_priv->mm.interruptible;
2885
	bool was_interruptible = dev_priv->mm.interruptible;
2664
	int ret;
2886
	int ret;
2665
 
2887
 
2666
	/* Big Hammer, we also need to ensure that any pending
2888
	/* Big Hammer, we also need to ensure that any pending
2667
	 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
2889
	 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
2668
	 * current scanout is retired before unpinning the old
2890
	 * current scanout is retired before unpinning the old
2669
	 * framebuffer.
2891
	 * framebuffer.
2670
	 *
2892
	 *
2671
	 * This should only fail upon a hung GPU, in which case we
2893
	 * This should only fail upon a hung GPU, in which case we
2672
	 * can safely continue.
2894
	 * can safely continue.
2673
	 */
2895
	 */
2674
	dev_priv->mm.interruptible = false;
2896
	dev_priv->mm.interruptible = false;
2675
	ret = i915_gem_object_finish_gpu(obj);
2897
	ret = i915_gem_object_finish_gpu(obj);
2676
	dev_priv->mm.interruptible = was_interruptible;
2898
	dev_priv->mm.interruptible = was_interruptible;
2677
 
2899
 
2678
	return ret;
2900
	return ret;
2679
}
2901
}
2680
 
2902
 
2681
static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
2903
static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
2682
{
2904
{
2683
	struct drm_device *dev = crtc->dev;
2905
	struct drm_device *dev = crtc->dev;
2684
	struct drm_i915_private *dev_priv = dev->dev_private;
2906
	struct drm_i915_private *dev_priv = dev->dev_private;
2685
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2907
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2686
	unsigned long flags;
-
 
2687
	bool pending;
2908
	bool pending;
2688
 
2909
 
2689
	if (i915_reset_in_progress(&dev_priv->gpu_error) ||
2910
	if (i915_reset_in_progress(&dev_priv->gpu_error) ||
2690
	    intel_crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
2911
	    intel_crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
2691
		return false;
2912
		return false;
2692
 
2913
 
2693
	spin_lock_irqsave(&dev->event_lock, flags);
2914
	spin_lock_irq(&dev->event_lock);
2694
	pending = to_intel_crtc(crtc)->unpin_work != NULL;
2915
	pending = to_intel_crtc(crtc)->unpin_work != NULL;
2695
	spin_unlock_irqrestore(&dev->event_lock, flags);
2916
	spin_unlock_irq(&dev->event_lock);
2696
 
2917
 
2697
	return pending;
2918
	return pending;
2698
}
2919
}
2699
#endif
2920
#endif
-
 
2921
 
-
 
2922
static void intel_update_pipe_size(struct intel_crtc *crtc)
-
 
2923
{
-
 
2924
	struct drm_device *dev = crtc->base.dev;
-
 
2925
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
2926
	const struct drm_display_mode *adjusted_mode;
-
 
2927
 
-
 
2928
	if (!i915.fastboot)
-
 
2929
		return;
-
 
2930
 
-
 
2931
	/*
-
 
2932
	 * Update pipe size and adjust fitter if needed: the reason for this is
-
 
2933
	 * that in compute_mode_changes we check the native mode (not the pfit
-
 
2934
	 * mode) to see if we can flip rather than do a full mode set. In the
-
 
2935
	 * fastboot case, we'll flip, but if we don't update the pipesrc and
-
 
2936
	 * pfit state, we'll end up with a big fb scanned out into the wrong
-
 
2937
	 * sized surface.
-
 
2938
	 *
-
 
2939
	 * To fix this properly, we need to hoist the checks up into
-
 
2940
	 * compute_mode_changes (or above), check the actual pfit state and
-
 
2941
	 * whether the platform allows pfit disable with pipe active, and only
-
 
2942
	 * then update the pipesrc and pfit state, even on the flip path.
-
 
2943
	 */
-
 
2944
 
-
 
2945
	adjusted_mode = &crtc->config.adjusted_mode;
-
 
2946
 
-
 
2947
	I915_WRITE(PIPESRC(crtc->pipe),
-
 
2948
		   ((adjusted_mode->crtc_hdisplay - 1) << 16) |
-
 
2949
		   (adjusted_mode->crtc_vdisplay - 1));
-
 
2950
	if (!crtc->config.pch_pfit.enabled &&
-
 
2951
	    (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) ||
-
 
2952
	     intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
-
 
2953
		I915_WRITE(PF_CTL(crtc->pipe), 0);
-
 
2954
		I915_WRITE(PF_WIN_POS(crtc->pipe), 0);
-
 
2955
		I915_WRITE(PF_WIN_SZ(crtc->pipe), 0);
-
 
2956
	}
-
 
2957
	crtc->config.pipe_src_w = adjusted_mode->crtc_hdisplay;
-
 
2958
	crtc->config.pipe_src_h = adjusted_mode->crtc_vdisplay;
-
 
2959
}
2700
 
2960
 
2701
static int
2961
static int
2702
intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
2962
intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
2703
		    struct drm_framebuffer *fb)
2963
		    struct drm_framebuffer *fb)
2704
{
2964
{
2705
	struct drm_device *dev = crtc->dev;
2965
	struct drm_device *dev = crtc->dev;
2706
	struct drm_i915_private *dev_priv = dev->dev_private;
2966
	struct drm_i915_private *dev_priv = dev->dev_private;
2707
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2967
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2708
	enum pipe pipe = intel_crtc->pipe;
2968
	enum pipe pipe = intel_crtc->pipe;
2709
	struct drm_framebuffer *old_fb = crtc->primary->fb;
2969
	struct drm_framebuffer *old_fb = crtc->primary->fb;
2710
	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
-
 
2711
	struct drm_i915_gem_object *old_obj = intel_fb_obj(old_fb);
2970
	struct drm_i915_gem_object *old_obj = intel_fb_obj(old_fb);
2712
	int ret;
2971
	int ret;
2713
 
2972
 
2714
 
2973
 
2715
	/* no fb bound */
2974
	/* no fb bound */
2716
	if (!fb) {
2975
	if (!fb) {
2717
		DRM_ERROR("No FB bound\n");
2976
		DRM_ERROR("No FB bound\n");
2718
		return 0;
2977
		return 0;
2719
	}
2978
	}
2720
 
2979
 
2721
	if (intel_crtc->plane > INTEL_INFO(dev)->num_pipes) {
2980
	if (intel_crtc->plane > INTEL_INFO(dev)->num_pipes) {
2722
		DRM_ERROR("no plane for crtc: plane %c, num_pipes %d\n",
2981
		DRM_ERROR("no plane for crtc: plane %c, num_pipes %d\n",
2723
			  plane_name(intel_crtc->plane),
2982
			  plane_name(intel_crtc->plane),
2724
				INTEL_INFO(dev)->num_pipes);
2983
				INTEL_INFO(dev)->num_pipes);
2725
		return -EINVAL;
2984
		return -EINVAL;
2726
	}
2985
	}
2727
 
2986
 
2728
	mutex_lock(&dev->struct_mutex);
2987
	mutex_lock(&dev->struct_mutex);
2729
	ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
2988
	ret = intel_pin_and_fence_fb_obj(crtc->primary, fb, NULL);
2730
	if (ret == 0)
2989
	if (ret == 0)
2731
		i915_gem_track_fb(old_obj, obj,
2990
		i915_gem_track_fb(old_obj, intel_fb_obj(fb),
2732
				  INTEL_FRONTBUFFER_PRIMARY(pipe));
2991
				  INTEL_FRONTBUFFER_PRIMARY(pipe));
2733
	mutex_unlock(&dev->struct_mutex);
2992
	mutex_unlock(&dev->struct_mutex);
2734
    if (ret != 0) {
2993
    if (ret != 0) {
2735
       DRM_ERROR("pin & fence failed\n");
2994
       DRM_ERROR("pin & fence failed\n");
2736
       return ret;
2995
       return ret;
2737
    }
2996
    }
2738
 
-
 
2739
	/*
-
 
2740
	 * Update pipe size and adjust fitter if needed: the reason for this is
-
 
2741
	 * that in compute_mode_changes we check the native mode (not the pfit
-
 
2742
	 * mode) to see if we can flip rather than do a full mode set. In the
-
 
2743
	 * fastboot case, we'll flip, but if we don't update the pipesrc and
-
 
2744
	 * pfit state, we'll end up with a big fb scanned out into the wrong
-
 
2745
	 * sized surface.
-
 
2746
	 *
-
 
2747
	 * To fix this properly, we need to hoist the checks up into
-
 
2748
	 * compute_mode_changes (or above), check the actual pfit state and
-
 
2749
	 * whether the platform allows pfit disable with pipe active, and only
-
 
2750
	 * then update the pipesrc and pfit state, even on the flip path.
-
 
2751
	 */
-
 
2752
	if (i915.fastboot) {
-
 
2753
		const struct drm_display_mode *adjusted_mode =
-
 
2754
			&intel_crtc->config.adjusted_mode;
-
 
2755
 
-
 
2756
		I915_WRITE(PIPESRC(intel_crtc->pipe),
-
 
2757
			   ((adjusted_mode->crtc_hdisplay - 1) << 16) |
-
 
2758
			   (adjusted_mode->crtc_vdisplay - 1));
-
 
2759
		if (!intel_crtc->config.pch_pfit.enabled &&
-
 
2760
		    (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) ||
-
 
2761
		     intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
-
 
2762
			I915_WRITE(PF_CTL(intel_crtc->pipe), 0);
-
 
2763
			I915_WRITE(PF_WIN_POS(intel_crtc->pipe), 0);
-
 
2764
			I915_WRITE(PF_WIN_SZ(intel_crtc->pipe), 0);
-
 
2765
		}
-
 
2766
		intel_crtc->config.pipe_src_w = adjusted_mode->crtc_hdisplay;
-
 
2767
		intel_crtc->config.pipe_src_h = adjusted_mode->crtc_vdisplay;
-
 
2768
	}
-
 
2769
 
2997
 
2770
	dev_priv->display.update_primary_plane(crtc, fb, x, y);
2998
	dev_priv->display.update_primary_plane(crtc, fb, x, y);
2771
 
2999
 
2772
	if (intel_crtc->active)
3000
	if (intel_crtc->active)
2773
		intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_PRIMARY(pipe));
3001
		intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_PRIMARY(pipe));
2774
 
3002
 
2775
	crtc->primary->fb = fb;
3003
	crtc->primary->fb = fb;
2776
	crtc->x = x;
3004
	crtc->x = x;
2777
	crtc->y = y;
3005
	crtc->y = y;
2778
 
3006
 
2779
	if (old_fb) {
3007
	if (old_fb) {
2780
		if (intel_crtc->active && old_fb != fb)
3008
		if (intel_crtc->active && old_fb != fb)
2781
		intel_wait_for_vblank(dev, intel_crtc->pipe);
3009
		intel_wait_for_vblank(dev, intel_crtc->pipe);
2782
		mutex_lock(&dev->struct_mutex);
3010
		mutex_lock(&dev->struct_mutex);
2783
		intel_unpin_fb_obj(old_obj);
3011
		intel_unpin_fb_obj(old_obj);
2784
		mutex_unlock(&dev->struct_mutex);
3012
		mutex_unlock(&dev->struct_mutex);
2785
	}
3013
	}
2786
 
3014
 
2787
	mutex_lock(&dev->struct_mutex);
3015
	mutex_lock(&dev->struct_mutex);
2788
	intel_update_fbc(dev);
3016
	intel_update_fbc(dev);
2789
	mutex_unlock(&dev->struct_mutex);
3017
	mutex_unlock(&dev->struct_mutex);
2790
 
3018
 
2791
    return 0;
3019
    return 0;
2792
}
3020
}
2793
 
3021
 
2794
static void intel_fdi_normal_train(struct drm_crtc *crtc)
3022
static void intel_fdi_normal_train(struct drm_crtc *crtc)
2795
{
3023
{
2796
	struct drm_device *dev = crtc->dev;
3024
	struct drm_device *dev = crtc->dev;
2797
	struct drm_i915_private *dev_priv = dev->dev_private;
3025
	struct drm_i915_private *dev_priv = dev->dev_private;
2798
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3026
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2799
	int pipe = intel_crtc->pipe;
3027
	int pipe = intel_crtc->pipe;
2800
	u32 reg, temp;
3028
	u32 reg, temp;
2801
 
3029
 
2802
	/* enable normal train */
3030
	/* enable normal train */
2803
	reg = FDI_TX_CTL(pipe);
3031
	reg = FDI_TX_CTL(pipe);
2804
	temp = I915_READ(reg);
3032
	temp = I915_READ(reg);
2805
	if (IS_IVYBRIDGE(dev)) {
3033
	if (IS_IVYBRIDGE(dev)) {
2806
		temp &= ~FDI_LINK_TRAIN_NONE_IVB;
3034
		temp &= ~FDI_LINK_TRAIN_NONE_IVB;
2807
		temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
3035
		temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
2808
	} else {
3036
	} else {
2809
		temp &= ~FDI_LINK_TRAIN_NONE;
3037
		temp &= ~FDI_LINK_TRAIN_NONE;
2810
		temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
3038
		temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
2811
	}
3039
	}
2812
	I915_WRITE(reg, temp);
3040
	I915_WRITE(reg, temp);
2813
 
3041
 
2814
	reg = FDI_RX_CTL(pipe);
3042
	reg = FDI_RX_CTL(pipe);
2815
	temp = I915_READ(reg);
3043
	temp = I915_READ(reg);
2816
	if (HAS_PCH_CPT(dev)) {
3044
	if (HAS_PCH_CPT(dev)) {
2817
		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3045
		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2818
		temp |= FDI_LINK_TRAIN_NORMAL_CPT;
3046
		temp |= FDI_LINK_TRAIN_NORMAL_CPT;
2819
	} else {
3047
	} else {
2820
		temp &= ~FDI_LINK_TRAIN_NONE;
3048
		temp &= ~FDI_LINK_TRAIN_NONE;
2821
		temp |= FDI_LINK_TRAIN_NONE;
3049
		temp |= FDI_LINK_TRAIN_NONE;
2822
	}
3050
	}
2823
	I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
3051
	I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
2824
 
3052
 
2825
	/* wait one idle pattern time */
3053
	/* wait one idle pattern time */
2826
	POSTING_READ(reg);
3054
	POSTING_READ(reg);
2827
	udelay(1000);
3055
	udelay(1000);
2828
 
3056
 
2829
	/* IVB wants error correction enabled */
3057
	/* IVB wants error correction enabled */
2830
	if (IS_IVYBRIDGE(dev))
3058
	if (IS_IVYBRIDGE(dev))
2831
		I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
3059
		I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
2832
			   FDI_FE_ERRC_ENABLE);
3060
			   FDI_FE_ERRC_ENABLE);
2833
}
3061
}
2834
 
3062
 
2835
static bool pipe_has_enabled_pch(struct intel_crtc *crtc)
3063
static bool pipe_has_enabled_pch(struct intel_crtc *crtc)
2836
{
3064
{
2837
	return crtc->base.enabled && crtc->active &&
3065
	return crtc->base.enabled && crtc->active &&
2838
		crtc->config.has_pch_encoder;
3066
		crtc->config.has_pch_encoder;
2839
}
3067
}
2840
 
3068
 
2841
static void ivb_modeset_global_resources(struct drm_device *dev)
3069
static void ivb_modeset_global_resources(struct drm_device *dev)
2842
{
3070
{
2843
	struct drm_i915_private *dev_priv = dev->dev_private;
3071
	struct drm_i915_private *dev_priv = dev->dev_private;
2844
	struct intel_crtc *pipe_B_crtc =
3072
	struct intel_crtc *pipe_B_crtc =
2845
		to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_B]);
3073
		to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_B]);
2846
	struct intel_crtc *pipe_C_crtc =
3074
	struct intel_crtc *pipe_C_crtc =
2847
		to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_C]);
3075
		to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_C]);
2848
	uint32_t temp;
3076
	uint32_t temp;
2849
 
3077
 
2850
	/*
3078
	/*
2851
	 * When everything is off disable fdi C so that we could enable fdi B
3079
	 * When everything is off disable fdi C so that we could enable fdi B
2852
	 * with all lanes. Note that we don't care about enabled pipes without
3080
	 * with all lanes. Note that we don't care about enabled pipes without
2853
	 * an enabled pch encoder.
3081
	 * an enabled pch encoder.
2854
	 */
3082
	 */
2855
	if (!pipe_has_enabled_pch(pipe_B_crtc) &&
3083
	if (!pipe_has_enabled_pch(pipe_B_crtc) &&
2856
	    !pipe_has_enabled_pch(pipe_C_crtc)) {
3084
	    !pipe_has_enabled_pch(pipe_C_crtc)) {
2857
		WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
3085
		WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
2858
		WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
3086
		WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
2859
 
3087
 
2860
		temp = I915_READ(SOUTH_CHICKEN1);
3088
		temp = I915_READ(SOUTH_CHICKEN1);
2861
		temp &= ~FDI_BC_BIFURCATION_SELECT;
3089
		temp &= ~FDI_BC_BIFURCATION_SELECT;
2862
		DRM_DEBUG_KMS("disabling fdi C rx\n");
3090
		DRM_DEBUG_KMS("disabling fdi C rx\n");
2863
		I915_WRITE(SOUTH_CHICKEN1, temp);
3091
		I915_WRITE(SOUTH_CHICKEN1, temp);
2864
	}
3092
	}
2865
}
3093
}
2866
 
3094
 
2867
/* The FDI link training functions for ILK/Ibexpeak. */
3095
/* The FDI link training functions for ILK/Ibexpeak. */
2868
static void ironlake_fdi_link_train(struct drm_crtc *crtc)
3096
static void ironlake_fdi_link_train(struct drm_crtc *crtc)
2869
{
3097
{
2870
    struct drm_device *dev = crtc->dev;
3098
    struct drm_device *dev = crtc->dev;
2871
    struct drm_i915_private *dev_priv = dev->dev_private;
3099
    struct drm_i915_private *dev_priv = dev->dev_private;
2872
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3100
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2873
    int pipe = intel_crtc->pipe;
3101
    int pipe = intel_crtc->pipe;
2874
    u32 reg, temp, tries;
3102
    u32 reg, temp, tries;
2875
 
3103
 
2876
	/* FDI needs bits from pipe first */
3104
	/* FDI needs bits from pipe first */
2877
    assert_pipe_enabled(dev_priv, pipe);
3105
    assert_pipe_enabled(dev_priv, pipe);
2878
 
3106
 
2879
    /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
3107
    /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2880
       for train result */
3108
       for train result */
2881
    reg = FDI_RX_IMR(pipe);
3109
    reg = FDI_RX_IMR(pipe);
2882
    temp = I915_READ(reg);
3110
    temp = I915_READ(reg);
2883
    temp &= ~FDI_RX_SYMBOL_LOCK;
3111
    temp &= ~FDI_RX_SYMBOL_LOCK;
2884
    temp &= ~FDI_RX_BIT_LOCK;
3112
    temp &= ~FDI_RX_BIT_LOCK;
2885
    I915_WRITE(reg, temp);
3113
    I915_WRITE(reg, temp);
2886
    I915_READ(reg);
3114
    I915_READ(reg);
2887
    udelay(150);
3115
    udelay(150);
2888
 
3116
 
2889
    /* enable CPU FDI TX and PCH FDI RX */
3117
    /* enable CPU FDI TX and PCH FDI RX */
2890
    reg = FDI_TX_CTL(pipe);
3118
    reg = FDI_TX_CTL(pipe);
2891
    temp = I915_READ(reg);
3119
    temp = I915_READ(reg);
2892
	temp &= ~FDI_DP_PORT_WIDTH_MASK;
3120
	temp &= ~FDI_DP_PORT_WIDTH_MASK;
2893
	temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes);
3121
	temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes);
2894
    temp &= ~FDI_LINK_TRAIN_NONE;
3122
    temp &= ~FDI_LINK_TRAIN_NONE;
2895
    temp |= FDI_LINK_TRAIN_PATTERN_1;
3123
    temp |= FDI_LINK_TRAIN_PATTERN_1;
2896
    I915_WRITE(reg, temp | FDI_TX_ENABLE);
3124
    I915_WRITE(reg, temp | FDI_TX_ENABLE);
2897
 
3125
 
2898
    reg = FDI_RX_CTL(pipe);
3126
    reg = FDI_RX_CTL(pipe);
2899
    temp = I915_READ(reg);
3127
    temp = I915_READ(reg);
2900
    temp &= ~FDI_LINK_TRAIN_NONE;
3128
    temp &= ~FDI_LINK_TRAIN_NONE;
2901
    temp |= FDI_LINK_TRAIN_PATTERN_1;
3129
    temp |= FDI_LINK_TRAIN_PATTERN_1;
2902
    I915_WRITE(reg, temp | FDI_RX_ENABLE);
3130
    I915_WRITE(reg, temp | FDI_RX_ENABLE);
2903
 
3131
 
2904
    POSTING_READ(reg);
3132
    POSTING_READ(reg);
2905
    udelay(150);
3133
    udelay(150);
2906
 
3134
 
2907
    /* Ironlake workaround, enable clock pointer after FDI enable*/
3135
    /* Ironlake workaround, enable clock pointer after FDI enable*/
2908
        I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
3136
        I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
2909
        I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
3137
        I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
2910
               FDI_RX_PHASE_SYNC_POINTER_EN);
3138
               FDI_RX_PHASE_SYNC_POINTER_EN);
2911
 
3139
 
2912
    reg = FDI_RX_IIR(pipe);
3140
    reg = FDI_RX_IIR(pipe);
2913
    for (tries = 0; tries < 5; tries++) {
3141
    for (tries = 0; tries < 5; tries++) {
2914
        temp = I915_READ(reg);
3142
        temp = I915_READ(reg);
2915
        DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3143
        DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2916
 
3144
 
2917
        if ((temp & FDI_RX_BIT_LOCK)) {
3145
        if ((temp & FDI_RX_BIT_LOCK)) {
2918
            DRM_DEBUG_KMS("FDI train 1 done.\n");
3146
            DRM_DEBUG_KMS("FDI train 1 done.\n");
2919
            I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
3147
            I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
2920
            break;
3148
            break;
2921
        }
3149
        }
2922
    }
3150
    }
2923
    if (tries == 5)
3151
    if (tries == 5)
2924
        DRM_ERROR("FDI train 1 fail!\n");
3152
        DRM_ERROR("FDI train 1 fail!\n");
2925
 
3153
 
2926
    /* Train 2 */
3154
    /* Train 2 */
2927
    reg = FDI_TX_CTL(pipe);
3155
    reg = FDI_TX_CTL(pipe);
2928
    temp = I915_READ(reg);
3156
    temp = I915_READ(reg);
2929
    temp &= ~FDI_LINK_TRAIN_NONE;
3157
    temp &= ~FDI_LINK_TRAIN_NONE;
2930
    temp |= FDI_LINK_TRAIN_PATTERN_2;
3158
    temp |= FDI_LINK_TRAIN_PATTERN_2;
2931
    I915_WRITE(reg, temp);
3159
    I915_WRITE(reg, temp);
2932
 
3160
 
2933
    reg = FDI_RX_CTL(pipe);
3161
    reg = FDI_RX_CTL(pipe);
2934
    temp = I915_READ(reg);
3162
    temp = I915_READ(reg);
2935
    temp &= ~FDI_LINK_TRAIN_NONE;
3163
    temp &= ~FDI_LINK_TRAIN_NONE;
2936
    temp |= FDI_LINK_TRAIN_PATTERN_2;
3164
    temp |= FDI_LINK_TRAIN_PATTERN_2;
2937
    I915_WRITE(reg, temp);
3165
    I915_WRITE(reg, temp);
2938
 
3166
 
2939
    POSTING_READ(reg);
3167
    POSTING_READ(reg);
2940
    udelay(150);
3168
    udelay(150);
2941
 
3169
 
2942
    reg = FDI_RX_IIR(pipe);
3170
    reg = FDI_RX_IIR(pipe);
2943
    for (tries = 0; tries < 5; tries++) {
3171
    for (tries = 0; tries < 5; tries++) {
2944
        temp = I915_READ(reg);
3172
        temp = I915_READ(reg);
2945
        DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3173
        DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2946
 
3174
 
2947
        if (temp & FDI_RX_SYMBOL_LOCK) {
3175
        if (temp & FDI_RX_SYMBOL_LOCK) {
2948
            I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
3176
            I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
2949
            DRM_DEBUG_KMS("FDI train 2 done.\n");
3177
            DRM_DEBUG_KMS("FDI train 2 done.\n");
2950
            break;
3178
            break;
2951
        }
3179
        }
2952
    }
3180
    }
2953
    if (tries == 5)
3181
    if (tries == 5)
2954
        DRM_ERROR("FDI train 2 fail!\n");
3182
        DRM_ERROR("FDI train 2 fail!\n");
2955
 
3183
 
2956
    DRM_DEBUG_KMS("FDI train done\n");
3184
    DRM_DEBUG_KMS("FDI train done\n");
2957
 
3185
 
2958
}
3186
}
2959
 
3187
 
2960
static const int snb_b_fdi_train_param[] = {
3188
static const int snb_b_fdi_train_param[] = {
2961
    FDI_LINK_TRAIN_400MV_0DB_SNB_B,
3189
    FDI_LINK_TRAIN_400MV_0DB_SNB_B,
2962
    FDI_LINK_TRAIN_400MV_6DB_SNB_B,
3190
    FDI_LINK_TRAIN_400MV_6DB_SNB_B,
2963
    FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
3191
    FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
2964
    FDI_LINK_TRAIN_800MV_0DB_SNB_B,
3192
    FDI_LINK_TRAIN_800MV_0DB_SNB_B,
2965
};
3193
};
2966
 
3194
 
2967
/* The FDI link training functions for SNB/Cougarpoint. */
3195
/* The FDI link training functions for SNB/Cougarpoint. */
2968
static void gen6_fdi_link_train(struct drm_crtc *crtc)
3196
static void gen6_fdi_link_train(struct drm_crtc *crtc)
2969
{
3197
{
2970
    struct drm_device *dev = crtc->dev;
3198
    struct drm_device *dev = crtc->dev;
2971
    struct drm_i915_private *dev_priv = dev->dev_private;
3199
    struct drm_i915_private *dev_priv = dev->dev_private;
2972
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3200
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2973
    int pipe = intel_crtc->pipe;
3201
    int pipe = intel_crtc->pipe;
2974
	u32 reg, temp, i, retry;
3202
	u32 reg, temp, i, retry;
2975
 
3203
 
2976
    /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
3204
    /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2977
       for train result */
3205
       for train result */
2978
    reg = FDI_RX_IMR(pipe);
3206
    reg = FDI_RX_IMR(pipe);
2979
    temp = I915_READ(reg);
3207
    temp = I915_READ(reg);
2980
    temp &= ~FDI_RX_SYMBOL_LOCK;
3208
    temp &= ~FDI_RX_SYMBOL_LOCK;
2981
    temp &= ~FDI_RX_BIT_LOCK;
3209
    temp &= ~FDI_RX_BIT_LOCK;
2982
    I915_WRITE(reg, temp);
3210
    I915_WRITE(reg, temp);
2983
 
3211
 
2984
    POSTING_READ(reg);
3212
    POSTING_READ(reg);
2985
    udelay(150);
3213
    udelay(150);
2986
 
3214
 
2987
    /* enable CPU FDI TX and PCH FDI RX */
3215
    /* enable CPU FDI TX and PCH FDI RX */
2988
    reg = FDI_TX_CTL(pipe);
3216
    reg = FDI_TX_CTL(pipe);
2989
    temp = I915_READ(reg);
3217
    temp = I915_READ(reg);
2990
	temp &= ~FDI_DP_PORT_WIDTH_MASK;
3218
	temp &= ~FDI_DP_PORT_WIDTH_MASK;
2991
	temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes);
3219
	temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes);
2992
    temp &= ~FDI_LINK_TRAIN_NONE;
3220
    temp &= ~FDI_LINK_TRAIN_NONE;
2993
    temp |= FDI_LINK_TRAIN_PATTERN_1;
3221
    temp |= FDI_LINK_TRAIN_PATTERN_1;
2994
    temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3222
    temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2995
    /* SNB-B */
3223
    /* SNB-B */
2996
    temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
3224
    temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2997
    I915_WRITE(reg, temp | FDI_TX_ENABLE);
3225
    I915_WRITE(reg, temp | FDI_TX_ENABLE);
2998
 
3226
 
2999
	I915_WRITE(FDI_RX_MISC(pipe),
3227
	I915_WRITE(FDI_RX_MISC(pipe),
3000
		   FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
3228
		   FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
3001
 
3229
 
3002
    reg = FDI_RX_CTL(pipe);
3230
    reg = FDI_RX_CTL(pipe);
3003
    temp = I915_READ(reg);
3231
    temp = I915_READ(reg);
3004
    if (HAS_PCH_CPT(dev)) {
3232
    if (HAS_PCH_CPT(dev)) {
3005
        temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3233
        temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3006
        temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
3234
        temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
3007
    } else {
3235
    } else {
3008
        temp &= ~FDI_LINK_TRAIN_NONE;
3236
        temp &= ~FDI_LINK_TRAIN_NONE;
3009
        temp |= FDI_LINK_TRAIN_PATTERN_1;
3237
        temp |= FDI_LINK_TRAIN_PATTERN_1;
3010
    }
3238
    }
3011
    I915_WRITE(reg, temp | FDI_RX_ENABLE);
3239
    I915_WRITE(reg, temp | FDI_RX_ENABLE);
3012
 
3240
 
3013
    POSTING_READ(reg);
3241
    POSTING_READ(reg);
3014
    udelay(150);
3242
    udelay(150);
3015
 
3243
 
3016
	for (i = 0; i < 4; i++) {
3244
	for (i = 0; i < 4; i++) {
3017
        reg = FDI_TX_CTL(pipe);
3245
        reg = FDI_TX_CTL(pipe);
3018
        temp = I915_READ(reg);
3246
        temp = I915_READ(reg);
3019
        temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3247
        temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3020
        temp |= snb_b_fdi_train_param[i];
3248
        temp |= snb_b_fdi_train_param[i];
3021
        I915_WRITE(reg, temp);
3249
        I915_WRITE(reg, temp);
3022
 
3250
 
3023
        POSTING_READ(reg);
3251
        POSTING_READ(reg);
3024
        udelay(500);
3252
        udelay(500);
3025
 
3253
 
3026
		for (retry = 0; retry < 5; retry++) {
3254
		for (retry = 0; retry < 5; retry++) {
3027
        reg = FDI_RX_IIR(pipe);
3255
        reg = FDI_RX_IIR(pipe);
3028
        temp = I915_READ(reg);
3256
        temp = I915_READ(reg);
3029
        DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3257
        DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3030
        if (temp & FDI_RX_BIT_LOCK) {
3258
        if (temp & FDI_RX_BIT_LOCK) {
3031
            I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
3259
            I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
3032
            DRM_DEBUG_KMS("FDI train 1 done.\n");
3260
            DRM_DEBUG_KMS("FDI train 1 done.\n");
3033
            break;
3261
            break;
3034
        }
3262
        }
3035
			udelay(50);
3263
			udelay(50);
3036
		}
3264
		}
3037
		if (retry < 5)
3265
		if (retry < 5)
3038
			break;
3266
			break;
3039
    }
3267
    }
3040
    if (i == 4)
3268
    if (i == 4)
3041
        DRM_ERROR("FDI train 1 fail!\n");
3269
        DRM_ERROR("FDI train 1 fail!\n");
3042
 
3270
 
3043
    /* Train 2 */
3271
    /* Train 2 */
3044
    reg = FDI_TX_CTL(pipe);
3272
    reg = FDI_TX_CTL(pipe);
3045
    temp = I915_READ(reg);
3273
    temp = I915_READ(reg);
3046
    temp &= ~FDI_LINK_TRAIN_NONE;
3274
    temp &= ~FDI_LINK_TRAIN_NONE;
3047
    temp |= FDI_LINK_TRAIN_PATTERN_2;
3275
    temp |= FDI_LINK_TRAIN_PATTERN_2;
3048
    if (IS_GEN6(dev)) {
3276
    if (IS_GEN6(dev)) {
3049
        temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3277
        temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3050
        /* SNB-B */
3278
        /* SNB-B */
3051
        temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
3279
        temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
3052
    }
3280
    }
3053
    I915_WRITE(reg, temp);
3281
    I915_WRITE(reg, temp);
3054
 
3282
 
3055
    reg = FDI_RX_CTL(pipe);
3283
    reg = FDI_RX_CTL(pipe);
3056
    temp = I915_READ(reg);
3284
    temp = I915_READ(reg);
3057
    if (HAS_PCH_CPT(dev)) {
3285
    if (HAS_PCH_CPT(dev)) {
3058
        temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3286
        temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3059
        temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
3287
        temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
3060
    } else {
3288
    } else {
3061
        temp &= ~FDI_LINK_TRAIN_NONE;
3289
        temp &= ~FDI_LINK_TRAIN_NONE;
3062
        temp |= FDI_LINK_TRAIN_PATTERN_2;
3290
        temp |= FDI_LINK_TRAIN_PATTERN_2;
3063
    }
3291
    }
3064
    I915_WRITE(reg, temp);
3292
    I915_WRITE(reg, temp);
3065
 
3293
 
3066
    POSTING_READ(reg);
3294
    POSTING_READ(reg);
3067
    udelay(150);
3295
    udelay(150);
3068
 
3296
 
3069
	for (i = 0; i < 4; i++) {
3297
	for (i = 0; i < 4; i++) {
3070
        reg = FDI_TX_CTL(pipe);
3298
        reg = FDI_TX_CTL(pipe);
3071
        temp = I915_READ(reg);
3299
        temp = I915_READ(reg);
3072
        temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3300
        temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3073
        temp |= snb_b_fdi_train_param[i];
3301
        temp |= snb_b_fdi_train_param[i];
3074
        I915_WRITE(reg, temp);
3302
        I915_WRITE(reg, temp);
3075
 
3303
 
3076
        POSTING_READ(reg);
3304
        POSTING_READ(reg);
3077
        udelay(500);
3305
        udelay(500);
3078
 
3306
 
3079
		for (retry = 0; retry < 5; retry++) {
3307
		for (retry = 0; retry < 5; retry++) {
3080
        reg = FDI_RX_IIR(pipe);
3308
        reg = FDI_RX_IIR(pipe);
3081
        temp = I915_READ(reg);
3309
        temp = I915_READ(reg);
3082
        DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3310
        DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3083
        if (temp & FDI_RX_SYMBOL_LOCK) {
3311
        if (temp & FDI_RX_SYMBOL_LOCK) {
3084
            I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
3312
            I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
3085
            DRM_DEBUG_KMS("FDI train 2 done.\n");
3313
            DRM_DEBUG_KMS("FDI train 2 done.\n");
3086
            break;
3314
            break;
3087
        }
3315
        }
3088
			udelay(50);
3316
			udelay(50);
3089
		}
3317
		}
3090
		if (retry < 5)
3318
		if (retry < 5)
3091
			break;
3319
			break;
3092
    }
3320
    }
3093
    if (i == 4)
3321
    if (i == 4)
3094
        DRM_ERROR("FDI train 2 fail!\n");
3322
        DRM_ERROR("FDI train 2 fail!\n");
3095
 
3323
 
3096
    DRM_DEBUG_KMS("FDI train done.\n");
3324
    DRM_DEBUG_KMS("FDI train done.\n");
3097
}
3325
}
3098
 
3326
 
3099
/* Manual link training for Ivy Bridge A0 parts */
3327
/* Manual link training for Ivy Bridge A0 parts */
3100
static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
3328
static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
3101
{
3329
{
3102
    struct drm_device *dev = crtc->dev;
3330
    struct drm_device *dev = crtc->dev;
3103
    struct drm_i915_private *dev_priv = dev->dev_private;
3331
    struct drm_i915_private *dev_priv = dev->dev_private;
3104
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3332
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3105
    int pipe = intel_crtc->pipe;
3333
    int pipe = intel_crtc->pipe;
3106
	u32 reg, temp, i, j;
3334
	u32 reg, temp, i, j;
3107
 
3335
 
3108
    /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
3336
    /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
3109
       for train result */
3337
       for train result */
3110
    reg = FDI_RX_IMR(pipe);
3338
    reg = FDI_RX_IMR(pipe);
3111
    temp = I915_READ(reg);
3339
    temp = I915_READ(reg);
3112
    temp &= ~FDI_RX_SYMBOL_LOCK;
3340
    temp &= ~FDI_RX_SYMBOL_LOCK;
3113
    temp &= ~FDI_RX_BIT_LOCK;
3341
    temp &= ~FDI_RX_BIT_LOCK;
3114
    I915_WRITE(reg, temp);
3342
    I915_WRITE(reg, temp);
3115
 
3343
 
3116
    POSTING_READ(reg);
3344
    POSTING_READ(reg);
3117
    udelay(150);
3345
    udelay(150);
3118
 
3346
 
3119
	DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n",
3347
	DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n",
3120
		      I915_READ(FDI_RX_IIR(pipe)));
3348
		      I915_READ(FDI_RX_IIR(pipe)));
3121
 
3349
 
3122
	/* Try each vswing and preemphasis setting twice before moving on */
3350
	/* Try each vswing and preemphasis setting twice before moving on */
3123
	for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
3351
	for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
3124
		/* disable first in case we need to retry */
3352
		/* disable first in case we need to retry */
3125
		reg = FDI_TX_CTL(pipe);
3353
		reg = FDI_TX_CTL(pipe);
3126
		temp = I915_READ(reg);
3354
		temp = I915_READ(reg);
3127
		temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
3355
		temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
3128
		temp &= ~FDI_TX_ENABLE;
3356
		temp &= ~FDI_TX_ENABLE;
3129
		I915_WRITE(reg, temp);
3357
		I915_WRITE(reg, temp);
3130
 
3358
 
3131
		reg = FDI_RX_CTL(pipe);
3359
		reg = FDI_RX_CTL(pipe);
3132
		temp = I915_READ(reg);
3360
		temp = I915_READ(reg);
3133
		temp &= ~FDI_LINK_TRAIN_AUTO;
3361
		temp &= ~FDI_LINK_TRAIN_AUTO;
3134
		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3362
		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3135
		temp &= ~FDI_RX_ENABLE;
3363
		temp &= ~FDI_RX_ENABLE;
3136
		I915_WRITE(reg, temp);
3364
		I915_WRITE(reg, temp);
3137
 
3365
 
3138
    /* enable CPU FDI TX and PCH FDI RX */
3366
    /* enable CPU FDI TX and PCH FDI RX */
3139
    reg = FDI_TX_CTL(pipe);
3367
    reg = FDI_TX_CTL(pipe);
3140
    temp = I915_READ(reg);
3368
    temp = I915_READ(reg);
3141
	temp &= ~FDI_DP_PORT_WIDTH_MASK;
3369
	temp &= ~FDI_DP_PORT_WIDTH_MASK;
3142
	temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes);
3370
	temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes);
3143
    temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
3371
    temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
3144
    temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3372
    temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3145
		temp |= snb_b_fdi_train_param[j/2];
3373
		temp |= snb_b_fdi_train_param[j/2];
3146
	temp |= FDI_COMPOSITE_SYNC;
3374
	temp |= FDI_COMPOSITE_SYNC;
3147
    I915_WRITE(reg, temp | FDI_TX_ENABLE);
3375
    I915_WRITE(reg, temp | FDI_TX_ENABLE);
3148
 
3376
 
3149
	I915_WRITE(FDI_RX_MISC(pipe),
3377
	I915_WRITE(FDI_RX_MISC(pipe),
3150
		   FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
3378
		   FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
3151
 
3379
 
3152
    reg = FDI_RX_CTL(pipe);
3380
    reg = FDI_RX_CTL(pipe);
3153
    temp = I915_READ(reg);
3381
    temp = I915_READ(reg);
3154
    temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
3382
    temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
3155
	temp |= FDI_COMPOSITE_SYNC;
3383
	temp |= FDI_COMPOSITE_SYNC;
3156
    I915_WRITE(reg, temp | FDI_RX_ENABLE);
3384
    I915_WRITE(reg, temp | FDI_RX_ENABLE);
3157
 
3385
 
3158
    POSTING_READ(reg);
3386
    POSTING_READ(reg);
3159
		udelay(1); /* should be 0.5us */
3387
		udelay(1); /* should be 0.5us */
3160
 
3388
 
3161
	for (i = 0; i < 4; i++) {
3389
	for (i = 0; i < 4; i++) {
3162
        reg = FDI_RX_IIR(pipe);
3390
        reg = FDI_RX_IIR(pipe);
3163
        temp = I915_READ(reg);
3391
        temp = I915_READ(reg);
3164
        DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3392
        DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3165
 
3393
 
3166
        if (temp & FDI_RX_BIT_LOCK ||
3394
        if (temp & FDI_RX_BIT_LOCK ||
3167
            (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
3395
            (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
3168
            I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
3396
            I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
3169
				DRM_DEBUG_KMS("FDI train 1 done, level %i.\n",
3397
				DRM_DEBUG_KMS("FDI train 1 done, level %i.\n",
3170
					      i);
3398
					      i);
3171
            break;
3399
            break;
3172
        }
3400
        }
3173
			udelay(1); /* should be 0.5us */
3401
			udelay(1); /* should be 0.5us */
3174
		}
3402
		}
3175
		if (i == 4) {
3403
		if (i == 4) {
3176
			DRM_DEBUG_KMS("FDI train 1 fail on vswing %d\n", j / 2);
3404
			DRM_DEBUG_KMS("FDI train 1 fail on vswing %d\n", j / 2);
3177
			continue;
3405
			continue;
3178
    }
3406
    }
3179
 
3407
 
3180
    /* Train 2 */
3408
    /* Train 2 */
3181
    reg = FDI_TX_CTL(pipe);
3409
    reg = FDI_TX_CTL(pipe);
3182
    temp = I915_READ(reg);
3410
    temp = I915_READ(reg);
3183
    temp &= ~FDI_LINK_TRAIN_NONE_IVB;
3411
    temp &= ~FDI_LINK_TRAIN_NONE_IVB;
3184
    temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
3412
    temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
3185
    I915_WRITE(reg, temp);
3413
    I915_WRITE(reg, temp);
3186
 
3414
 
3187
    reg = FDI_RX_CTL(pipe);
3415
    reg = FDI_RX_CTL(pipe);
3188
    temp = I915_READ(reg);
3416
    temp = I915_READ(reg);
3189
    temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3417
    temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3190
    temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
3418
    temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
3191
    I915_WRITE(reg, temp);
3419
    I915_WRITE(reg, temp);
3192
 
3420
 
3193
    POSTING_READ(reg);
3421
    POSTING_READ(reg);
3194
		udelay(2); /* should be 1.5us */
3422
		udelay(2); /* should be 1.5us */
3195
 
3423
 
3196
	for (i = 0; i < 4; i++) {
3424
	for (i = 0; i < 4; i++) {
3197
        reg = FDI_RX_IIR(pipe);
3425
        reg = FDI_RX_IIR(pipe);
3198
        temp = I915_READ(reg);
3426
        temp = I915_READ(reg);
3199
        DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3427
        DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3200
 
3428
 
3201
			if (temp & FDI_RX_SYMBOL_LOCK ||
3429
			if (temp & FDI_RX_SYMBOL_LOCK ||
3202
			    (I915_READ(reg) & FDI_RX_SYMBOL_LOCK)) {
3430
			    (I915_READ(reg) & FDI_RX_SYMBOL_LOCK)) {
3203
            I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
3431
            I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
3204
				DRM_DEBUG_KMS("FDI train 2 done, level %i.\n",
3432
				DRM_DEBUG_KMS("FDI train 2 done, level %i.\n",
3205
					      i);
3433
					      i);
3206
				goto train_done;
3434
				goto train_done;
3207
        }
3435
        }
3208
			udelay(2); /* should be 1.5us */
3436
			udelay(2); /* should be 1.5us */
3209
    }
3437
    }
3210
    if (i == 4)
3438
    if (i == 4)
3211
			DRM_DEBUG_KMS("FDI train 2 fail on vswing %d\n", j / 2);
3439
			DRM_DEBUG_KMS("FDI train 2 fail on vswing %d\n", j / 2);
3212
	}
3440
	}
3213
 
3441
 
3214
train_done:
3442
train_done:
3215
    DRM_DEBUG_KMS("FDI train done.\n");
3443
    DRM_DEBUG_KMS("FDI train done.\n");
3216
}
3444
}
3217
 
3445
 
3218
static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc)
3446
static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc)
3219
{
3447
{
3220
	struct drm_device *dev = intel_crtc->base.dev;
3448
	struct drm_device *dev = intel_crtc->base.dev;
3221
	struct drm_i915_private *dev_priv = dev->dev_private;
3449
	struct drm_i915_private *dev_priv = dev->dev_private;
3222
	int pipe = intel_crtc->pipe;
3450
	int pipe = intel_crtc->pipe;
3223
	u32 reg, temp;
3451
	u32 reg, temp;
3224
 
3452
 
3225
 
3453
 
3226
	/* enable PCH FDI RX PLL, wait warmup plus DMI latency */
3454
	/* enable PCH FDI RX PLL, wait warmup plus DMI latency */
3227
	reg = FDI_RX_CTL(pipe);
3455
	reg = FDI_RX_CTL(pipe);
3228
	temp = I915_READ(reg);
3456
	temp = I915_READ(reg);
3229
	temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
3457
	temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
3230
	temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes);
3458
	temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes);
3231
	temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
3459
	temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
3232
	I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
3460
	I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
3233
 
3461
 
3234
	POSTING_READ(reg);
3462
	POSTING_READ(reg);
3235
	udelay(200);
3463
	udelay(200);
3236
 
3464
 
3237
	/* Switch from Rawclk to PCDclk */
3465
	/* Switch from Rawclk to PCDclk */
3238
	temp = I915_READ(reg);
3466
	temp = I915_READ(reg);
3239
	I915_WRITE(reg, temp | FDI_PCDCLK);
3467
	I915_WRITE(reg, temp | FDI_PCDCLK);
3240
 
3468
 
3241
	POSTING_READ(reg);
3469
	POSTING_READ(reg);
3242
	udelay(200);
3470
	udelay(200);
3243
 
3471
 
3244
	/* Enable CPU FDI TX PLL, always on for Ironlake */
3472
	/* Enable CPU FDI TX PLL, always on for Ironlake */
3245
	reg = FDI_TX_CTL(pipe);
3473
	reg = FDI_TX_CTL(pipe);
3246
	temp = I915_READ(reg);
3474
	temp = I915_READ(reg);
3247
	if ((temp & FDI_TX_PLL_ENABLE) == 0) {
3475
	if ((temp & FDI_TX_PLL_ENABLE) == 0) {
3248
		I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
3476
		I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
3249
 
3477
 
3250
		POSTING_READ(reg);
3478
		POSTING_READ(reg);
3251
		udelay(100);
3479
		udelay(100);
3252
	}
3480
	}
3253
}
3481
}
3254
 
3482
 
3255
static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
3483
static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
3256
{
3484
{
3257
	struct drm_device *dev = intel_crtc->base.dev;
3485
	struct drm_device *dev = intel_crtc->base.dev;
3258
	struct drm_i915_private *dev_priv = dev->dev_private;
3486
	struct drm_i915_private *dev_priv = dev->dev_private;
3259
	int pipe = intel_crtc->pipe;
3487
	int pipe = intel_crtc->pipe;
3260
	u32 reg, temp;
3488
	u32 reg, temp;
3261
 
3489
 
3262
	/* Switch from PCDclk to Rawclk */
3490
	/* Switch from PCDclk to Rawclk */
3263
	reg = FDI_RX_CTL(pipe);
3491
	reg = FDI_RX_CTL(pipe);
3264
	temp = I915_READ(reg);
3492
	temp = I915_READ(reg);
3265
	I915_WRITE(reg, temp & ~FDI_PCDCLK);
3493
	I915_WRITE(reg, temp & ~FDI_PCDCLK);
3266
 
3494
 
3267
	/* Disable CPU FDI TX PLL */
3495
	/* Disable CPU FDI TX PLL */
3268
	reg = FDI_TX_CTL(pipe);
3496
	reg = FDI_TX_CTL(pipe);
3269
	temp = I915_READ(reg);
3497
	temp = I915_READ(reg);
3270
	I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
3498
	I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
3271
 
3499
 
3272
	POSTING_READ(reg);
3500
	POSTING_READ(reg);
3273
	udelay(100);
3501
	udelay(100);
3274
 
3502
 
3275
	reg = FDI_RX_CTL(pipe);
3503
	reg = FDI_RX_CTL(pipe);
3276
	temp = I915_READ(reg);
3504
	temp = I915_READ(reg);
3277
	I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
3505
	I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
3278
 
3506
 
3279
	/* Wait for the clocks to turn off. */
3507
	/* Wait for the clocks to turn off. */
3280
	POSTING_READ(reg);
3508
	POSTING_READ(reg);
3281
	udelay(100);
3509
	udelay(100);
3282
}
3510
}
3283
 
3511
 
3284
static void ironlake_fdi_disable(struct drm_crtc *crtc)
3512
static void ironlake_fdi_disable(struct drm_crtc *crtc)
3285
{
3513
{
3286
	struct drm_device *dev = crtc->dev;
3514
	struct drm_device *dev = crtc->dev;
3287
	struct drm_i915_private *dev_priv = dev->dev_private;
3515
	struct drm_i915_private *dev_priv = dev->dev_private;
3288
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3516
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3289
	int pipe = intel_crtc->pipe;
3517
	int pipe = intel_crtc->pipe;
3290
	u32 reg, temp;
3518
	u32 reg, temp;
3291
 
3519
 
3292
	/* disable CPU FDI tx and PCH FDI rx */
3520
	/* disable CPU FDI tx and PCH FDI rx */
3293
	reg = FDI_TX_CTL(pipe);
3521
	reg = FDI_TX_CTL(pipe);
3294
	temp = I915_READ(reg);
3522
	temp = I915_READ(reg);
3295
	I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
3523
	I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
3296
	POSTING_READ(reg);
3524
	POSTING_READ(reg);
3297
 
3525
 
3298
	reg = FDI_RX_CTL(pipe);
3526
	reg = FDI_RX_CTL(pipe);
3299
	temp = I915_READ(reg);
3527
	temp = I915_READ(reg);
3300
	temp &= ~(0x7 << 16);
3528
	temp &= ~(0x7 << 16);
3301
	temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
3529
	temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
3302
	I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
3530
	I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
3303
 
3531
 
3304
	POSTING_READ(reg);
3532
	POSTING_READ(reg);
3305
	udelay(100);
3533
	udelay(100);
3306
 
3534
 
3307
	/* Ironlake workaround, disable clock pointer after downing FDI */
3535
	/* Ironlake workaround, disable clock pointer after downing FDI */
3308
	if (HAS_PCH_IBX(dev))
3536
	if (HAS_PCH_IBX(dev))
3309
		I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
3537
		I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
3310
 
3538
 
3311
	/* still set train pattern 1 */
3539
	/* still set train pattern 1 */
3312
	reg = FDI_TX_CTL(pipe);
3540
	reg = FDI_TX_CTL(pipe);
3313
	temp = I915_READ(reg);
3541
	temp = I915_READ(reg);
3314
	temp &= ~FDI_LINK_TRAIN_NONE;
3542
	temp &= ~FDI_LINK_TRAIN_NONE;
3315
	temp |= FDI_LINK_TRAIN_PATTERN_1;
3543
	temp |= FDI_LINK_TRAIN_PATTERN_1;
3316
	I915_WRITE(reg, temp);
3544
	I915_WRITE(reg, temp);
3317
 
3545
 
3318
	reg = FDI_RX_CTL(pipe);
3546
	reg = FDI_RX_CTL(pipe);
3319
	temp = I915_READ(reg);
3547
	temp = I915_READ(reg);
3320
	if (HAS_PCH_CPT(dev)) {
3548
	if (HAS_PCH_CPT(dev)) {
3321
		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3549
		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3322
		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
3550
		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
3323
	} else {
3551
	} else {
3324
		temp &= ~FDI_LINK_TRAIN_NONE;
3552
		temp &= ~FDI_LINK_TRAIN_NONE;
3325
		temp |= FDI_LINK_TRAIN_PATTERN_1;
3553
		temp |= FDI_LINK_TRAIN_PATTERN_1;
3326
	}
3554
	}
3327
	/* BPC in FDI rx is consistent with that in PIPECONF */
3555
	/* BPC in FDI rx is consistent with that in PIPECONF */
3328
	temp &= ~(0x07 << 16);
3556
	temp &= ~(0x07 << 16);
3329
	temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
3557
	temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
3330
	I915_WRITE(reg, temp);
3558
	I915_WRITE(reg, temp);
3331
 
3559
 
3332
	POSTING_READ(reg);
3560
	POSTING_READ(reg);
3333
	udelay(100);
3561
	udelay(100);
3334
}
3562
}
3335
 
3563
 
3336
bool intel_has_pending_fb_unpin(struct drm_device *dev)
3564
bool intel_has_pending_fb_unpin(struct drm_device *dev)
3337
{
3565
{
3338
	struct intel_crtc *crtc;
3566
	struct intel_crtc *crtc;
3339
 
3567
 
3340
	/* Note that we don't need to be called with mode_config.lock here
3568
	/* Note that we don't need to be called with mode_config.lock here
3341
	 * as our list of CRTC objects is static for the lifetime of the
3569
	 * as our list of CRTC objects is static for the lifetime of the
3342
	 * device and so cannot disappear as we iterate. Similarly, we can
3570
	 * device and so cannot disappear as we iterate. Similarly, we can
3343
	 * happily treat the predicates as racy, atomic checks as userspace
3571
	 * happily treat the predicates as racy, atomic checks as userspace
3344
	 * cannot claim and pin a new fb without at least acquring the
3572
	 * cannot claim and pin a new fb without at least acquring the
3345
	 * struct_mutex and so serialising with us.
3573
	 * struct_mutex and so serialising with us.
3346
	 */
3574
	 */
3347
	for_each_intel_crtc(dev, crtc) {
3575
	for_each_intel_crtc(dev, crtc) {
3348
		if (atomic_read(&crtc->unpin_work_count) == 0)
3576
		if (atomic_read(&crtc->unpin_work_count) == 0)
3349
			continue;
3577
			continue;
3350
 
3578
 
3351
		if (crtc->unpin_work)
3579
		if (crtc->unpin_work)
3352
			intel_wait_for_vblank(dev, crtc->pipe);
3580
			intel_wait_for_vblank(dev, crtc->pipe);
3353
 
3581
 
3354
		return true;
3582
		return true;
3355
	}
3583
	}
3356
 
3584
 
3357
	return false;
3585
	return false;
3358
}
3586
}
3359
 
3587
 
3360
#if 0
3588
#if 0
3361
void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
3589
void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
3362
{
3590
{
3363
	struct drm_device *dev = crtc->dev;
3591
	struct drm_device *dev = crtc->dev;
3364
	struct drm_i915_private *dev_priv = dev->dev_private;
3592
	struct drm_i915_private *dev_priv = dev->dev_private;
3365
 
-
 
3366
	if (crtc->primary->fb == NULL)
-
 
3367
		return;
-
 
3368
 
3593
 
3369
	WARN_ON(waitqueue_active(&dev_priv->pending_flip_queue));
-
 
3370
 
3594
	WARN_ON(waitqueue_active(&dev_priv->pending_flip_queue));
3371
	WARN_ON(wait_event_timeout(dev_priv->pending_flip_queue,
3595
	if (WARN_ON(wait_event_timeout(dev_priv->pending_flip_queue,
3372
				   !intel_crtc_has_pending_flip(crtc),
3596
				       !intel_crtc_has_pending_flip(crtc),
-
 
3597
				       60*HZ) == 0)) {
-
 
3598
		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-
 
3599
 
-
 
3600
		spin_lock_irq(&dev->event_lock);
-
 
3601
		if (intel_crtc->unpin_work) {
-
 
3602
			WARN_ONCE(1, "Removing stuck page flip\n");
-
 
3603
			page_flip_completed(intel_crtc);
-
 
3604
		}
-
 
3605
		spin_unlock_irq(&dev->event_lock);
-
 
3606
	}
3373
				   60*HZ) == 0);
3607
 
3374
 
3608
	if (crtc->primary->fb) {
3375
	mutex_lock(&dev->struct_mutex);
3609
	mutex_lock(&dev->struct_mutex);
3376
	intel_finish_fb(crtc->primary->fb);
3610
	intel_finish_fb(crtc->primary->fb);
3377
	mutex_unlock(&dev->struct_mutex);
3611
	mutex_unlock(&dev->struct_mutex);
3378
}
3612
	}
-
 
3613
}
3379
#endif
3614
#endif
3380
 
3615
 
3381
/* Program iCLKIP clock to the desired frequency */
3616
/* Program iCLKIP clock to the desired frequency */
3382
static void lpt_program_iclkip(struct drm_crtc *crtc)
3617
static void lpt_program_iclkip(struct drm_crtc *crtc)
3383
{
3618
{
3384
	struct drm_device *dev = crtc->dev;
3619
	struct drm_device *dev = crtc->dev;
3385
	struct drm_i915_private *dev_priv = dev->dev_private;
3620
	struct drm_i915_private *dev_priv = dev->dev_private;
3386
	int clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock;
3621
	int clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock;
3387
	u32 divsel, phaseinc, auxdiv, phasedir = 0;
3622
	u32 divsel, phaseinc, auxdiv, phasedir = 0;
3388
	u32 temp;
3623
	u32 temp;
3389
 
3624
 
3390
	mutex_lock(&dev_priv->dpio_lock);
3625
	mutex_lock(&dev_priv->dpio_lock);
3391
 
3626
 
3392
	/* It is necessary to ungate the pixclk gate prior to programming
3627
	/* It is necessary to ungate the pixclk gate prior to programming
3393
	 * the divisors, and gate it back when it is done.
3628
	 * the divisors, and gate it back when it is done.
3394
	 */
3629
	 */
3395
	I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE);
3630
	I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE);
3396
 
3631
 
3397
	/* Disable SSCCTL */
3632
	/* Disable SSCCTL */
3398
	intel_sbi_write(dev_priv, SBI_SSCCTL6,
3633
	intel_sbi_write(dev_priv, SBI_SSCCTL6,
3399
			intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK) |
3634
			intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK) |
3400
				SBI_SSCCTL_DISABLE,
3635
				SBI_SSCCTL_DISABLE,
3401
			SBI_ICLK);
3636
			SBI_ICLK);
3402
 
3637
 
3403
	/* 20MHz is a corner case which is out of range for the 7-bit divisor */
3638
	/* 20MHz is a corner case which is out of range for the 7-bit divisor */
3404
	if (clock == 20000) {
3639
	if (clock == 20000) {
3405
		auxdiv = 1;
3640
		auxdiv = 1;
3406
		divsel = 0x41;
3641
		divsel = 0x41;
3407
		phaseinc = 0x20;
3642
		phaseinc = 0x20;
3408
	} else {
3643
	} else {
3409
		/* The iCLK virtual clock root frequency is in MHz,
3644
		/* The iCLK virtual clock root frequency is in MHz,
3410
		 * but the adjusted_mode->crtc_clock in in KHz. To get the
3645
		 * but the adjusted_mode->crtc_clock in in KHz. To get the
3411
		 * divisors, it is necessary to divide one by another, so we
3646
		 * divisors, it is necessary to divide one by another, so we
3412
		 * convert the virtual clock precision to KHz here for higher
3647
		 * convert the virtual clock precision to KHz here for higher
3413
		 * precision.
3648
		 * precision.
3414
		 */
3649
		 */
3415
		u32 iclk_virtual_root_freq = 172800 * 1000;
3650
		u32 iclk_virtual_root_freq = 172800 * 1000;
3416
		u32 iclk_pi_range = 64;
3651
		u32 iclk_pi_range = 64;
3417
		u32 desired_divisor, msb_divisor_value, pi_value;
3652
		u32 desired_divisor, msb_divisor_value, pi_value;
3418
 
3653
 
3419
		desired_divisor = (iclk_virtual_root_freq / clock);
3654
		desired_divisor = (iclk_virtual_root_freq / clock);
3420
		msb_divisor_value = desired_divisor / iclk_pi_range;
3655
		msb_divisor_value = desired_divisor / iclk_pi_range;
3421
		pi_value = desired_divisor % iclk_pi_range;
3656
		pi_value = desired_divisor % iclk_pi_range;
3422
 
3657
 
3423
		auxdiv = 0;
3658
		auxdiv = 0;
3424
		divsel = msb_divisor_value - 2;
3659
		divsel = msb_divisor_value - 2;
3425
		phaseinc = pi_value;
3660
		phaseinc = pi_value;
3426
	}
3661
	}
3427
 
3662
 
3428
	/* This should not happen with any sane values */
3663
	/* This should not happen with any sane values */
3429
	WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
3664
	WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
3430
		~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
3665
		~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
3431
	WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) &
3666
	WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) &
3432
		~SBI_SSCDIVINTPHASE_INCVAL_MASK);
3667
		~SBI_SSCDIVINTPHASE_INCVAL_MASK);
3433
 
3668
 
3434
	DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
3669
	DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
3435
			clock,
3670
			clock,
3436
			auxdiv,
3671
			auxdiv,
3437
			divsel,
3672
			divsel,
3438
			phasedir,
3673
			phasedir,
3439
			phaseinc);
3674
			phaseinc);
3440
 
3675
 
3441
	/* Program SSCDIVINTPHASE6 */
3676
	/* Program SSCDIVINTPHASE6 */
3442
	temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
3677
	temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
3443
	temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
3678
	temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
3444
	temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
3679
	temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
3445
	temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
3680
	temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
3446
	temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
3681
	temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
3447
	temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
3682
	temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
3448
	temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
3683
	temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
3449
	intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
3684
	intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
3450
 
3685
 
3451
	/* Program SSCAUXDIV */
3686
	/* Program SSCAUXDIV */
3452
	temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
3687
	temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
3453
	temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
3688
	temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
3454
	temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
3689
	temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
3455
	intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
3690
	intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
3456
 
3691
 
3457
	/* Enable modulator and associated divider */
3692
	/* Enable modulator and associated divider */
3458
	temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
3693
	temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
3459
	temp &= ~SBI_SSCCTL_DISABLE;
3694
	temp &= ~SBI_SSCCTL_DISABLE;
3460
	intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
3695
	intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
3461
 
3696
 
3462
	/* Wait for initialization time */
3697
	/* Wait for initialization time */
3463
	udelay(24);
3698
	udelay(24);
3464
 
3699
 
3465
	I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE);
3700
	I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE);
3466
 
3701
 
3467
	mutex_unlock(&dev_priv->dpio_lock);
3702
	mutex_unlock(&dev_priv->dpio_lock);
3468
}
3703
}
3469
 
3704
 
3470
static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc,
3705
static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc,
3471
						enum pipe pch_transcoder)
3706
						enum pipe pch_transcoder)
3472
{
3707
{
3473
	struct drm_device *dev = crtc->base.dev;
3708
	struct drm_device *dev = crtc->base.dev;
3474
	struct drm_i915_private *dev_priv = dev->dev_private;
3709
	struct drm_i915_private *dev_priv = dev->dev_private;
3475
	enum transcoder cpu_transcoder = crtc->config.cpu_transcoder;
3710
	enum transcoder cpu_transcoder = crtc->config.cpu_transcoder;
3476
 
3711
 
3477
	I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder),
3712
	I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder),
3478
		   I915_READ(HTOTAL(cpu_transcoder)));
3713
		   I915_READ(HTOTAL(cpu_transcoder)));
3479
	I915_WRITE(PCH_TRANS_HBLANK(pch_transcoder),
3714
	I915_WRITE(PCH_TRANS_HBLANK(pch_transcoder),
3480
		   I915_READ(HBLANK(cpu_transcoder)));
3715
		   I915_READ(HBLANK(cpu_transcoder)));
3481
	I915_WRITE(PCH_TRANS_HSYNC(pch_transcoder),
3716
	I915_WRITE(PCH_TRANS_HSYNC(pch_transcoder),
3482
		   I915_READ(HSYNC(cpu_transcoder)));
3717
		   I915_READ(HSYNC(cpu_transcoder)));
3483
 
3718
 
3484
	I915_WRITE(PCH_TRANS_VTOTAL(pch_transcoder),
3719
	I915_WRITE(PCH_TRANS_VTOTAL(pch_transcoder),
3485
		   I915_READ(VTOTAL(cpu_transcoder)));
3720
		   I915_READ(VTOTAL(cpu_transcoder)));
3486
	I915_WRITE(PCH_TRANS_VBLANK(pch_transcoder),
3721
	I915_WRITE(PCH_TRANS_VBLANK(pch_transcoder),
3487
		   I915_READ(VBLANK(cpu_transcoder)));
3722
		   I915_READ(VBLANK(cpu_transcoder)));
3488
	I915_WRITE(PCH_TRANS_VSYNC(pch_transcoder),
3723
	I915_WRITE(PCH_TRANS_VSYNC(pch_transcoder),
3489
		   I915_READ(VSYNC(cpu_transcoder)));
3724
		   I915_READ(VSYNC(cpu_transcoder)));
3490
	I915_WRITE(PCH_TRANS_VSYNCSHIFT(pch_transcoder),
3725
	I915_WRITE(PCH_TRANS_VSYNCSHIFT(pch_transcoder),
3491
		   I915_READ(VSYNCSHIFT(cpu_transcoder)));
3726
		   I915_READ(VSYNCSHIFT(cpu_transcoder)));
3492
}
3727
}
3493
 
3728
 
3494
static void cpt_enable_fdi_bc_bifurcation(struct drm_device *dev)
3729
static void cpt_enable_fdi_bc_bifurcation(struct drm_device *dev)
3495
{
3730
{
3496
	struct drm_i915_private *dev_priv = dev->dev_private;
3731
	struct drm_i915_private *dev_priv = dev->dev_private;
3497
	uint32_t temp;
3732
	uint32_t temp;
3498
 
3733
 
3499
	temp = I915_READ(SOUTH_CHICKEN1);
3734
	temp = I915_READ(SOUTH_CHICKEN1);
3500
	if (temp & FDI_BC_BIFURCATION_SELECT)
3735
	if (temp & FDI_BC_BIFURCATION_SELECT)
3501
		return;
3736
		return;
3502
 
3737
 
3503
	WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
3738
	WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
3504
	WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
3739
	WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
3505
 
3740
 
3506
	temp |= FDI_BC_BIFURCATION_SELECT;
3741
	temp |= FDI_BC_BIFURCATION_SELECT;
3507
	DRM_DEBUG_KMS("enabling fdi C rx\n");
3742
	DRM_DEBUG_KMS("enabling fdi C rx\n");
3508
	I915_WRITE(SOUTH_CHICKEN1, temp);
3743
	I915_WRITE(SOUTH_CHICKEN1, temp);
3509
	POSTING_READ(SOUTH_CHICKEN1);
3744
	POSTING_READ(SOUTH_CHICKEN1);
3510
}
3745
}
3511
 
3746
 
3512
static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc)
3747
static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc)
3513
{
3748
{
3514
	struct drm_device *dev = intel_crtc->base.dev;
3749
	struct drm_device *dev = intel_crtc->base.dev;
3515
	struct drm_i915_private *dev_priv = dev->dev_private;
3750
	struct drm_i915_private *dev_priv = dev->dev_private;
3516
 
3751
 
3517
	switch (intel_crtc->pipe) {
3752
	switch (intel_crtc->pipe) {
3518
	case PIPE_A:
3753
	case PIPE_A:
3519
		break;
3754
		break;
3520
	case PIPE_B:
3755
	case PIPE_B:
3521
		if (intel_crtc->config.fdi_lanes > 2)
3756
		if (intel_crtc->config.fdi_lanes > 2)
3522
			WARN_ON(I915_READ(SOUTH_CHICKEN1) & FDI_BC_BIFURCATION_SELECT);
3757
			WARN_ON(I915_READ(SOUTH_CHICKEN1) & FDI_BC_BIFURCATION_SELECT);
3523
		else
3758
		else
3524
			cpt_enable_fdi_bc_bifurcation(dev);
3759
			cpt_enable_fdi_bc_bifurcation(dev);
3525
 
3760
 
3526
		break;
3761
		break;
3527
	case PIPE_C:
3762
	case PIPE_C:
3528
		cpt_enable_fdi_bc_bifurcation(dev);
3763
		cpt_enable_fdi_bc_bifurcation(dev);
3529
 
3764
 
3530
		break;
3765
		break;
3531
	default:
3766
	default:
3532
		BUG();
3767
		BUG();
3533
	}
3768
	}
3534
}
3769
}
3535
 
3770
 
3536
/*
3771
/*
3537
 * Enable PCH resources required for PCH ports:
3772
 * Enable PCH resources required for PCH ports:
3538
 *   - PCH PLLs
3773
 *   - PCH PLLs
3539
 *   - FDI training & RX/TX
3774
 *   - FDI training & RX/TX
3540
 *   - update transcoder timings
3775
 *   - update transcoder timings
3541
 *   - DP transcoding bits
3776
 *   - DP transcoding bits
3542
 *   - transcoder
3777
 *   - transcoder
3543
 */
3778
 */
3544
static void ironlake_pch_enable(struct drm_crtc *crtc)
3779
static void ironlake_pch_enable(struct drm_crtc *crtc)
3545
{
3780
{
3546
	struct drm_device *dev = crtc->dev;
3781
	struct drm_device *dev = crtc->dev;
3547
	struct drm_i915_private *dev_priv = dev->dev_private;
3782
	struct drm_i915_private *dev_priv = dev->dev_private;
3548
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3783
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3549
	int pipe = intel_crtc->pipe;
3784
	int pipe = intel_crtc->pipe;
3550
	u32 reg, temp;
3785
	u32 reg, temp;
3551
 
3786
 
3552
	assert_pch_transcoder_disabled(dev_priv, pipe);
3787
	assert_pch_transcoder_disabled(dev_priv, pipe);
3553
 
3788
 
3554
	if (IS_IVYBRIDGE(dev))
3789
	if (IS_IVYBRIDGE(dev))
3555
		ivybridge_update_fdi_bc_bifurcation(intel_crtc);
3790
		ivybridge_update_fdi_bc_bifurcation(intel_crtc);
3556
 
3791
 
3557
	/* Write the TU size bits before fdi link training, so that error
3792
	/* Write the TU size bits before fdi link training, so that error
3558
	 * detection works. */
3793
	 * detection works. */
3559
	I915_WRITE(FDI_RX_TUSIZE1(pipe),
3794
	I915_WRITE(FDI_RX_TUSIZE1(pipe),
3560
		   I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
3795
		   I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
3561
 
3796
 
3562
	/* For PCH output, training FDI link */
3797
	/* For PCH output, training FDI link */
3563
	dev_priv->display.fdi_link_train(crtc);
3798
	dev_priv->display.fdi_link_train(crtc);
3564
 
3799
 
3565
	/* We need to program the right clock selection before writing the pixel
3800
	/* We need to program the right clock selection before writing the pixel
3566
	 * mutliplier into the DPLL. */
3801
	 * mutliplier into the DPLL. */
3567
	if (HAS_PCH_CPT(dev)) {
3802
	if (HAS_PCH_CPT(dev)) {
3568
		u32 sel;
3803
		u32 sel;
3569
 
3804
 
3570
		temp = I915_READ(PCH_DPLL_SEL);
3805
		temp = I915_READ(PCH_DPLL_SEL);
3571
		temp |= TRANS_DPLL_ENABLE(pipe);
3806
		temp |= TRANS_DPLL_ENABLE(pipe);
3572
		sel = TRANS_DPLLB_SEL(pipe);
3807
		sel = TRANS_DPLLB_SEL(pipe);
3573
		if (intel_crtc->config.shared_dpll == DPLL_ID_PCH_PLL_B)
3808
		if (intel_crtc->config.shared_dpll == DPLL_ID_PCH_PLL_B)
3574
			temp |= sel;
3809
			temp |= sel;
3575
		else
3810
		else
3576
			temp &= ~sel;
3811
			temp &= ~sel;
3577
		I915_WRITE(PCH_DPLL_SEL, temp);
3812
		I915_WRITE(PCH_DPLL_SEL, temp);
3578
	}
3813
	}
3579
 
3814
 
3580
	/* XXX: pch pll's can be enabled any time before we enable the PCH
3815
	/* XXX: pch pll's can be enabled any time before we enable the PCH
3581
	 * transcoder, and we actually should do this to not upset any PCH
3816
	 * transcoder, and we actually should do this to not upset any PCH
3582
	 * transcoder that already use the clock when we share it.
3817
	 * transcoder that already use the clock when we share it.
3583
	 *
3818
	 *
3584
	 * Note that enable_shared_dpll tries to do the right thing, but
3819
	 * Note that enable_shared_dpll tries to do the right thing, but
3585
	 * get_shared_dpll unconditionally resets the pll - we need that to have
3820
	 * get_shared_dpll unconditionally resets the pll - we need that to have
3586
	 * the right LVDS enable sequence. */
3821
	 * the right LVDS enable sequence. */
3587
	intel_enable_shared_dpll(intel_crtc);
3822
	intel_enable_shared_dpll(intel_crtc);
3588
 
3823
 
3589
	/* set transcoder timing, panel must allow it */
3824
	/* set transcoder timing, panel must allow it */
3590
	assert_panel_unlocked(dev_priv, pipe);
3825
	assert_panel_unlocked(dev_priv, pipe);
3591
	ironlake_pch_transcoder_set_timings(intel_crtc, pipe);
3826
	ironlake_pch_transcoder_set_timings(intel_crtc, pipe);
3592
 
3827
 
3593
	intel_fdi_normal_train(crtc);
3828
	intel_fdi_normal_train(crtc);
3594
 
3829
 
3595
	/* For PCH DP, enable TRANS_DP_CTL */
3830
	/* For PCH DP, enable TRANS_DP_CTL */
3596
	if (HAS_PCH_CPT(dev) &&
-
 
3597
	    (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
-
 
3598
	     intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
3831
	if (HAS_PCH_CPT(dev) && intel_crtc->config.has_dp_encoder) {
3599
		u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
3832
		u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
3600
		reg = TRANS_DP_CTL(pipe);
3833
		reg = TRANS_DP_CTL(pipe);
3601
		temp = I915_READ(reg);
3834
		temp = I915_READ(reg);
3602
		temp &= ~(TRANS_DP_PORT_SEL_MASK |
3835
		temp &= ~(TRANS_DP_PORT_SEL_MASK |
3603
			  TRANS_DP_SYNC_MASK |
3836
			  TRANS_DP_SYNC_MASK |
3604
			  TRANS_DP_BPC_MASK);
3837
			  TRANS_DP_BPC_MASK);
3605
		temp |= (TRANS_DP_OUTPUT_ENABLE |
3838
		temp |= (TRANS_DP_OUTPUT_ENABLE |
3606
			 TRANS_DP_ENH_FRAMING);
3839
			 TRANS_DP_ENH_FRAMING);
3607
		temp |= bpc << 9; /* same format but at 11:9 */
3840
		temp |= bpc << 9; /* same format but at 11:9 */
3608
 
3841
 
3609
		if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC)
3842
		if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC)
3610
			temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
3843
			temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
3611
		if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC)
3844
		if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC)
3612
			temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
3845
			temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
3613
 
3846
 
3614
		switch (intel_trans_dp_port_sel(crtc)) {
3847
		switch (intel_trans_dp_port_sel(crtc)) {
3615
		case PCH_DP_B:
3848
		case PCH_DP_B:
3616
			temp |= TRANS_DP_PORT_SEL_B;
3849
			temp |= TRANS_DP_PORT_SEL_B;
3617
			break;
3850
			break;
3618
		case PCH_DP_C:
3851
		case PCH_DP_C:
3619
			temp |= TRANS_DP_PORT_SEL_C;
3852
			temp |= TRANS_DP_PORT_SEL_C;
3620
			break;
3853
			break;
3621
		case PCH_DP_D:
3854
		case PCH_DP_D:
3622
			temp |= TRANS_DP_PORT_SEL_D;
3855
			temp |= TRANS_DP_PORT_SEL_D;
3623
			break;
3856
			break;
3624
		default:
3857
		default:
3625
			BUG();
3858
			BUG();
3626
		}
3859
		}
3627
 
3860
 
3628
		I915_WRITE(reg, temp);
3861
		I915_WRITE(reg, temp);
3629
	}
3862
	}
3630
 
3863
 
3631
	ironlake_enable_pch_transcoder(dev_priv, pipe);
3864
	ironlake_enable_pch_transcoder(dev_priv, pipe);
3632
}
3865
}
3633
 
3866
 
3634
static void lpt_pch_enable(struct drm_crtc *crtc)
3867
static void lpt_pch_enable(struct drm_crtc *crtc)
3635
{
3868
{
3636
	struct drm_device *dev = crtc->dev;
3869
	struct drm_device *dev = crtc->dev;
3637
	struct drm_i915_private *dev_priv = dev->dev_private;
3870
	struct drm_i915_private *dev_priv = dev->dev_private;
3638
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3871
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3639
	enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
3872
	enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
3640
 
3873
 
3641
	assert_pch_transcoder_disabled(dev_priv, TRANSCODER_A);
3874
	assert_pch_transcoder_disabled(dev_priv, TRANSCODER_A);
3642
 
3875
 
3643
	lpt_program_iclkip(crtc);
3876
	lpt_program_iclkip(crtc);
3644
 
3877
 
3645
	/* Set transcoder timing. */
3878
	/* Set transcoder timing. */
3646
	ironlake_pch_transcoder_set_timings(intel_crtc, PIPE_A);
3879
	ironlake_pch_transcoder_set_timings(intel_crtc, PIPE_A);
3647
 
3880
 
3648
	lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
3881
	lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
3649
}
3882
}
3650
 
3883
 
3651
void intel_put_shared_dpll(struct intel_crtc *crtc)
3884
void intel_put_shared_dpll(struct intel_crtc *crtc)
3652
{
3885
{
3653
	struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
3886
	struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
3654
 
3887
 
3655
	if (pll == NULL)
3888
	if (pll == NULL)
3656
		return;
3889
		return;
3657
 
3890
 
3658
	if (pll->refcount == 0) {
3891
	if (!(pll->config.crtc_mask & (1 << crtc->pipe))) {
3659
		WARN(1, "bad %s refcount\n", pll->name);
3892
		WARN(1, "bad %s crtc mask\n", pll->name);
3660
		return;
3893
		return;
3661
	}
3894
	}
-
 
3895
 
3662
 
3896
	pll->config.crtc_mask &= ~(1 << crtc->pipe);
3663
	if (--pll->refcount == 0) {
3897
	if (pll->config.crtc_mask == 0) {
3664
		WARN_ON(pll->on);
3898
		WARN_ON(pll->on);
3665
		WARN_ON(pll->active);
3899
		WARN_ON(pll->active);
3666
	}
3900
	}
3667
 
3901
 
3668
	crtc->config.shared_dpll = DPLL_ID_PRIVATE;
3902
	crtc->config.shared_dpll = DPLL_ID_PRIVATE;
3669
}
3903
}
3670
 
3904
 
3671
struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc)
3905
struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc)
3672
{
3906
{
3673
	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
3907
	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
3674
	struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
3908
	struct intel_shared_dpll *pll;
3675
	enum intel_dpll_id i;
3909
	enum intel_dpll_id i;
3676
 
-
 
3677
	if (pll) {
-
 
3678
		DRM_DEBUG_KMS("CRTC:%d dropping existing %s\n",
-
 
3679
			      crtc->base.base.id, pll->name);
-
 
3680
		intel_put_shared_dpll(crtc);
-
 
3681
	}
-
 
3682
 
3910
 
3683
	if (HAS_PCH_IBX(dev_priv->dev)) {
3911
	if (HAS_PCH_IBX(dev_priv->dev)) {
3684
		/* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
3912
		/* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
3685
		i = (enum intel_dpll_id) crtc->pipe;
3913
		i = (enum intel_dpll_id) crtc->pipe;
3686
		pll = &dev_priv->shared_dplls[i];
3914
		pll = &dev_priv->shared_dplls[i];
3687
 
3915
 
3688
		DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n",
3916
		DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n",
3689
			      crtc->base.base.id, pll->name);
3917
			      crtc->base.base.id, pll->name);
3690
 
3918
 
3691
		WARN_ON(pll->refcount);
3919
		WARN_ON(pll->new_config->crtc_mask);
3692
 
3920
 
3693
		goto found;
3921
		goto found;
3694
	}
3922
	}
3695
 
3923
 
3696
	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
3924
	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
3697
		pll = &dev_priv->shared_dplls[i];
3925
		pll = &dev_priv->shared_dplls[i];
3698
 
3926
 
3699
		/* Only want to check enabled timings first */
3927
		/* Only want to check enabled timings first */
3700
		if (pll->refcount == 0)
3928
		if (pll->new_config->crtc_mask == 0)
3701
			continue;
3929
			continue;
3702
 
3930
 
-
 
3931
		if (memcmp(&crtc->new_config->dpll_hw_state,
3703
		if (memcmp(&crtc->config.dpll_hw_state, &pll->hw_state,
3932
			   &pll->new_config->hw_state,
3704
			   sizeof(pll->hw_state)) == 0) {
3933
			   sizeof(pll->new_config->hw_state)) == 0) {
3705
			DRM_DEBUG_KMS("CRTC:%d sharing existing %s (refcount %d, ative %d)\n",
3934
			DRM_DEBUG_KMS("CRTC:%d sharing existing %s (crtc mask 0x%08x, ative %d)\n",
3706
				      crtc->base.base.id,
3935
				      crtc->base.base.id, pll->name,
3707
				      pll->name, pll->refcount, pll->active);
-
 
-
 
3936
				      pll->new_config->crtc_mask,
3708
 
3937
				      pll->active);
3709
			goto found;
3938
			goto found;
3710
		}
3939
		}
3711
	}
3940
	}
3712
 
3941
 
3713
	/* Ok no matching timings, maybe there's a free one? */
3942
	/* Ok no matching timings, maybe there's a free one? */
3714
	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
3943
	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
3715
		pll = &dev_priv->shared_dplls[i];
3944
		pll = &dev_priv->shared_dplls[i];
3716
		if (pll->refcount == 0) {
3945
		if (pll->new_config->crtc_mask == 0) {
3717
			DRM_DEBUG_KMS("CRTC:%d allocated %s\n",
3946
			DRM_DEBUG_KMS("CRTC:%d allocated %s\n",
3718
				      crtc->base.base.id, pll->name);
3947
				      crtc->base.base.id, pll->name);
3719
			goto found;
3948
			goto found;
3720
		}
3949
		}
3721
	}
3950
	}
3722
 
3951
 
3723
	return NULL;
3952
	return NULL;
3724
 
3953
 
3725
found:
3954
found:
3726
	if (pll->refcount == 0)
3955
	if (pll->new_config->crtc_mask == 0)
3727
		pll->hw_state = crtc->config.dpll_hw_state;
3956
		pll->new_config->hw_state = crtc->new_config->dpll_hw_state;
3728
 
3957
 
3729
	crtc->config.shared_dpll = i;
3958
	crtc->new_config->shared_dpll = i;
3730
	DRM_DEBUG_DRIVER("using %s for pipe %c\n", pll->name,
3959
	DRM_DEBUG_DRIVER("using %s for pipe %c\n", pll->name,
3731
			 pipe_name(crtc->pipe));
3960
			 pipe_name(crtc->pipe));
3732
 
3961
 
3733
	pll->refcount++;
3962
	pll->new_config->crtc_mask |= 1 << crtc->pipe;
3734
 
3963
 
3735
	return pll;
3964
	return pll;
3736
}
3965
}
-
 
3966
 
-
 
3967
/**
-
 
3968
 * intel_shared_dpll_start_config - start a new PLL staged config
-
 
3969
 * @dev_priv: DRM device
-
 
3970
 * @clear_pipes: mask of pipes that will have their PLLs freed
-
 
3971
 *
-
 
3972
 * Starts a new PLL staged config, copying the current config but
-
 
3973
 * releasing the references of pipes specified in clear_pipes.
-
 
3974
 */
-
 
3975
static int intel_shared_dpll_start_config(struct drm_i915_private *dev_priv,
-
 
3976
					  unsigned clear_pipes)
-
 
3977
{
-
 
3978
	struct intel_shared_dpll *pll;
-
 
3979
	enum intel_dpll_id i;
-
 
3980
 
-
 
3981
	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
-
 
3982
		pll = &dev_priv->shared_dplls[i];
-
 
3983
 
-
 
3984
		pll->new_config = kmemdup(&pll->config, sizeof pll->config,
-
 
3985
					  GFP_KERNEL);
-
 
3986
		if (!pll->new_config)
-
 
3987
			goto cleanup;
-
 
3988
 
-
 
3989
		pll->new_config->crtc_mask &= ~clear_pipes;
-
 
3990
	}
-
 
3991
 
-
 
3992
	return 0;
-
 
3993
 
-
 
3994
cleanup:
-
 
3995
	while (--i >= 0) {
-
 
3996
		pll = &dev_priv->shared_dplls[i];
-
 
3997
		kfree(pll->new_config);
-
 
3998
		pll->new_config = NULL;
-
 
3999
	}
-
 
4000
 
-
 
4001
	return -ENOMEM;
-
 
4002
}
-
 
4003
 
-
 
4004
static void intel_shared_dpll_commit(struct drm_i915_private *dev_priv)
-
 
4005
{
-
 
4006
	struct intel_shared_dpll *pll;
-
 
4007
	enum intel_dpll_id i;
-
 
4008
 
-
 
4009
	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
-
 
4010
		pll = &dev_priv->shared_dplls[i];
-
 
4011
 
-
 
4012
		WARN_ON(pll->new_config == &pll->config);
-
 
4013
 
-
 
4014
		pll->config = *pll->new_config;
-
 
4015
		kfree(pll->new_config);
-
 
4016
		pll->new_config = NULL;
-
 
4017
	}
-
 
4018
}
-
 
4019
 
-
 
4020
static void intel_shared_dpll_abort_config(struct drm_i915_private *dev_priv)
-
 
4021
{
-
 
4022
	struct intel_shared_dpll *pll;
-
 
4023
	enum intel_dpll_id i;
-
 
4024
 
-
 
4025
	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
-
 
4026
		pll = &dev_priv->shared_dplls[i];
-
 
4027
 
-
 
4028
		WARN_ON(pll->new_config == &pll->config);
-
 
4029
 
-
 
4030
		kfree(pll->new_config);
-
 
4031
		pll->new_config = NULL;
-
 
4032
	}
-
 
4033
}
3737
 
4034
 
3738
static void cpt_verify_modeset(struct drm_device *dev, int pipe)
4035
static void cpt_verify_modeset(struct drm_device *dev, int pipe)
3739
{
4036
{
3740
	struct drm_i915_private *dev_priv = dev->dev_private;
4037
	struct drm_i915_private *dev_priv = dev->dev_private;
3741
	int dslreg = PIPEDSL(pipe);
4038
	int dslreg = PIPEDSL(pipe);
3742
	u32 temp;
4039
	u32 temp;
3743
 
4040
 
3744
	temp = I915_READ(dslreg);
4041
	temp = I915_READ(dslreg);
3745
	udelay(500);
4042
	udelay(500);
3746
	if (wait_for(I915_READ(dslreg) != temp, 5)) {
4043
	if (wait_for(I915_READ(dslreg) != temp, 5)) {
3747
		if (wait_for(I915_READ(dslreg) != temp, 5))
4044
		if (wait_for(I915_READ(dslreg) != temp, 5))
3748
			DRM_ERROR("mode set failed: pipe %c stuck\n", pipe_name(pipe));
4045
			DRM_ERROR("mode set failed: pipe %c stuck\n", pipe_name(pipe));
3749
	}
4046
	}
3750
}
4047
}
-
 
4048
 
-
 
4049
static void skylake_pfit_enable(struct intel_crtc *crtc)
-
 
4050
{
-
 
4051
	struct drm_device *dev = crtc->base.dev;
-
 
4052
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
4053
	int pipe = crtc->pipe;
-
 
4054
 
-
 
4055
	if (crtc->config.pch_pfit.enabled) {
-
 
4056
		I915_WRITE(PS_CTL(pipe), PS_ENABLE);
-
 
4057
		I915_WRITE(PS_WIN_POS(pipe), crtc->config.pch_pfit.pos);
-
 
4058
		I915_WRITE(PS_WIN_SZ(pipe), crtc->config.pch_pfit.size);
-
 
4059
	}
-
 
4060
}
3751
 
4061
 
3752
static void ironlake_pfit_enable(struct intel_crtc *crtc)
4062
static void ironlake_pfit_enable(struct intel_crtc *crtc)
3753
{
4063
{
3754
	struct drm_device *dev = crtc->base.dev;
4064
	struct drm_device *dev = crtc->base.dev;
3755
	struct drm_i915_private *dev_priv = dev->dev_private;
4065
	struct drm_i915_private *dev_priv = dev->dev_private;
3756
	int pipe = crtc->pipe;
4066
	int pipe = crtc->pipe;
3757
 
4067
 
3758
	if (crtc->config.pch_pfit.enabled) {
4068
	if (crtc->config.pch_pfit.enabled) {
3759
		/* Force use of hard-coded filter coefficients
4069
		/* Force use of hard-coded filter coefficients
3760
		 * as some pre-programmed values are broken,
4070
		 * as some pre-programmed values are broken,
3761
		 * e.g. x201.
4071
		 * e.g. x201.
3762
		 */
4072
		 */
3763
		if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
4073
		if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
3764
			I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
4074
			I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
3765
						 PF_PIPE_SEL_IVB(pipe));
4075
						 PF_PIPE_SEL_IVB(pipe));
3766
		else
4076
		else
3767
			I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
4077
			I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
3768
		I915_WRITE(PF_WIN_POS(pipe), crtc->config.pch_pfit.pos);
4078
		I915_WRITE(PF_WIN_POS(pipe), crtc->config.pch_pfit.pos);
3769
		I915_WRITE(PF_WIN_SZ(pipe), crtc->config.pch_pfit.size);
4079
		I915_WRITE(PF_WIN_SZ(pipe), crtc->config.pch_pfit.size);
3770
	}
4080
	}
3771
}
4081
}
3772
 
4082
 
3773
static void intel_enable_planes(struct drm_crtc *crtc)
4083
static void intel_enable_planes(struct drm_crtc *crtc)
3774
{
4084
{
3775
	struct drm_device *dev = crtc->dev;
4085
	struct drm_device *dev = crtc->dev;
3776
	enum pipe pipe = to_intel_crtc(crtc)->pipe;
4086
	enum pipe pipe = to_intel_crtc(crtc)->pipe;
3777
	struct drm_plane *plane;
4087
	struct drm_plane *plane;
3778
	struct intel_plane *intel_plane;
4088
	struct intel_plane *intel_plane;
3779
 
4089
 
3780
	drm_for_each_legacy_plane(plane, &dev->mode_config.plane_list) {
4090
	drm_for_each_legacy_plane(plane, &dev->mode_config.plane_list) {
3781
		intel_plane = to_intel_plane(plane);
4091
		intel_plane = to_intel_plane(plane);
3782
		if (intel_plane->pipe == pipe)
4092
		if (intel_plane->pipe == pipe)
3783
			intel_plane_restore(&intel_plane->base);
4093
			intel_plane_restore(&intel_plane->base);
3784
	}
4094
	}
3785
}
4095
}
3786
 
4096
 
3787
static void intel_disable_planes(struct drm_crtc *crtc)
4097
static void intel_disable_planes(struct drm_crtc *crtc)
3788
{
4098
{
3789
	struct drm_device *dev = crtc->dev;
4099
	struct drm_device *dev = crtc->dev;
3790
	enum pipe pipe = to_intel_crtc(crtc)->pipe;
4100
	enum pipe pipe = to_intel_crtc(crtc)->pipe;
3791
	struct drm_plane *plane;
4101
	struct drm_plane *plane;
3792
	struct intel_plane *intel_plane;
4102
	struct intel_plane *intel_plane;
3793
 
4103
 
3794
	drm_for_each_legacy_plane(plane, &dev->mode_config.plane_list) {
4104
	drm_for_each_legacy_plane(plane, &dev->mode_config.plane_list) {
3795
		intel_plane = to_intel_plane(plane);
4105
		intel_plane = to_intel_plane(plane);
3796
		if (intel_plane->pipe == pipe)
4106
		if (intel_plane->pipe == pipe)
3797
			intel_plane_disable(&intel_plane->base);
4107
			intel_plane_disable(&intel_plane->base);
3798
	}
4108
	}
3799
}
4109
}
3800
 
4110
 
3801
void hsw_enable_ips(struct intel_crtc *crtc)
4111
void hsw_enable_ips(struct intel_crtc *crtc)
3802
{
4112
{
3803
	struct drm_device *dev = crtc->base.dev;
4113
	struct drm_device *dev = crtc->base.dev;
3804
	struct drm_i915_private *dev_priv = dev->dev_private;
4114
	struct drm_i915_private *dev_priv = dev->dev_private;
3805
 
4115
 
3806
	if (!crtc->config.ips_enabled)
4116
	if (!crtc->config.ips_enabled)
3807
		return;
4117
		return;
3808
 
4118
 
3809
	/* We can only enable IPS after we enable a plane and wait for a vblank */
4119
	/* We can only enable IPS after we enable a plane and wait for a vblank */
3810
	intel_wait_for_vblank(dev, crtc->pipe);
4120
	intel_wait_for_vblank(dev, crtc->pipe);
3811
 
4121
 
3812
	assert_plane_enabled(dev_priv, crtc->plane);
4122
	assert_plane_enabled(dev_priv, crtc->plane);
3813
	if (IS_BROADWELL(dev)) {
4123
	if (IS_BROADWELL(dev)) {
3814
		mutex_lock(&dev_priv->rps.hw_lock);
4124
		mutex_lock(&dev_priv->rps.hw_lock);
3815
		WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0xc0000000));
4125
		WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0xc0000000));
3816
		mutex_unlock(&dev_priv->rps.hw_lock);
4126
		mutex_unlock(&dev_priv->rps.hw_lock);
3817
		/* Quoting Art Runyan: "its not safe to expect any particular
4127
		/* Quoting Art Runyan: "its not safe to expect any particular
3818
		 * value in IPS_CTL bit 31 after enabling IPS through the
4128
		 * value in IPS_CTL bit 31 after enabling IPS through the
3819
		 * mailbox." Moreover, the mailbox may return a bogus state,
4129
		 * mailbox." Moreover, the mailbox may return a bogus state,
3820
		 * so we need to just enable it and continue on.
4130
		 * so we need to just enable it and continue on.
3821
		 */
4131
		 */
3822
	} else {
4132
	} else {
3823
		I915_WRITE(IPS_CTL, IPS_ENABLE);
4133
		I915_WRITE(IPS_CTL, IPS_ENABLE);
3824
		/* The bit only becomes 1 in the next vblank, so this wait here
4134
		/* The bit only becomes 1 in the next vblank, so this wait here
3825
		 * is essentially intel_wait_for_vblank. If we don't have this
4135
		 * is essentially intel_wait_for_vblank. If we don't have this
3826
		 * and don't wait for vblanks until the end of crtc_enable, then
4136
		 * and don't wait for vblanks until the end of crtc_enable, then
3827
		 * the HW state readout code will complain that the expected
4137
		 * the HW state readout code will complain that the expected
3828
		 * IPS_CTL value is not the one we read. */
4138
		 * IPS_CTL value is not the one we read. */
3829
		if (wait_for(I915_READ_NOTRACE(IPS_CTL) & IPS_ENABLE, 50))
4139
		if (wait_for(I915_READ_NOTRACE(IPS_CTL) & IPS_ENABLE, 50))
3830
			DRM_ERROR("Timed out waiting for IPS enable\n");
4140
			DRM_ERROR("Timed out waiting for IPS enable\n");
3831
	}
4141
	}
3832
}
4142
}
3833
 
4143
 
3834
void hsw_disable_ips(struct intel_crtc *crtc)
4144
void hsw_disable_ips(struct intel_crtc *crtc)
3835
{
4145
{
3836
	struct drm_device *dev = crtc->base.dev;
4146
	struct drm_device *dev = crtc->base.dev;
3837
	struct drm_i915_private *dev_priv = dev->dev_private;
4147
	struct drm_i915_private *dev_priv = dev->dev_private;
3838
 
4148
 
3839
	if (!crtc->config.ips_enabled)
4149
	if (!crtc->config.ips_enabled)
3840
		return;
4150
		return;
3841
 
4151
 
3842
	assert_plane_enabled(dev_priv, crtc->plane);
4152
	assert_plane_enabled(dev_priv, crtc->plane);
3843
	if (IS_BROADWELL(dev)) {
4153
	if (IS_BROADWELL(dev)) {
3844
		mutex_lock(&dev_priv->rps.hw_lock);
4154
		mutex_lock(&dev_priv->rps.hw_lock);
3845
		WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
4155
		WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
3846
		mutex_unlock(&dev_priv->rps.hw_lock);
4156
		mutex_unlock(&dev_priv->rps.hw_lock);
3847
		/* wait for pcode to finish disabling IPS, which may take up to 42ms */
4157
		/* wait for pcode to finish disabling IPS, which may take up to 42ms */
3848
		if (wait_for((I915_READ(IPS_CTL) & IPS_ENABLE) == 0, 42))
4158
		if (wait_for((I915_READ(IPS_CTL) & IPS_ENABLE) == 0, 42))
3849
			DRM_ERROR("Timed out waiting for IPS disable\n");
4159
			DRM_ERROR("Timed out waiting for IPS disable\n");
3850
	} else {
4160
	} else {
3851
		I915_WRITE(IPS_CTL, 0);
4161
		I915_WRITE(IPS_CTL, 0);
3852
		POSTING_READ(IPS_CTL);
4162
		POSTING_READ(IPS_CTL);
3853
	}
4163
	}
3854
 
4164
 
3855
	/* We need to wait for a vblank before we can disable the plane. */
4165
	/* We need to wait for a vblank before we can disable the plane. */
3856
	intel_wait_for_vblank(dev, crtc->pipe);
4166
	intel_wait_for_vblank(dev, crtc->pipe);
3857
}
4167
}
3858
 
4168
 
3859
/** Loads the palette/gamma unit for the CRTC with the prepared values */
4169
/** Loads the palette/gamma unit for the CRTC with the prepared values */
3860
static void intel_crtc_load_lut(struct drm_crtc *crtc)
4170
static void intel_crtc_load_lut(struct drm_crtc *crtc)
3861
{
4171
{
3862
	struct drm_device *dev = crtc->dev;
4172
	struct drm_device *dev = crtc->dev;
3863
	struct drm_i915_private *dev_priv = dev->dev_private;
4173
	struct drm_i915_private *dev_priv = dev->dev_private;
3864
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4174
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3865
	enum pipe pipe = intel_crtc->pipe;
4175
	enum pipe pipe = intel_crtc->pipe;
3866
	int palreg = PALETTE(pipe);
4176
	int palreg = PALETTE(pipe);
3867
	int i;
4177
	int i;
3868
	bool reenable_ips = false;
4178
	bool reenable_ips = false;
3869
 
4179
 
3870
	/* The clocks have to be on to load the palette. */
4180
	/* The clocks have to be on to load the palette. */
3871
	if (!crtc->enabled || !intel_crtc->active)
4181
	if (!crtc->enabled || !intel_crtc->active)
3872
		return;
4182
		return;
3873
 
4183
 
3874
	if (!HAS_PCH_SPLIT(dev_priv->dev)) {
4184
	if (!HAS_PCH_SPLIT(dev_priv->dev)) {
3875
		if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI))
4185
		if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI))
3876
			assert_dsi_pll_enabled(dev_priv);
4186
			assert_dsi_pll_enabled(dev_priv);
3877
		else
4187
		else
3878
			assert_pll_enabled(dev_priv, pipe);
4188
			assert_pll_enabled(dev_priv, pipe);
3879
	}
4189
	}
3880
 
4190
 
3881
	/* use legacy palette for Ironlake */
4191
	/* use legacy palette for Ironlake */
3882
	if (!HAS_GMCH_DISPLAY(dev))
4192
	if (!HAS_GMCH_DISPLAY(dev))
3883
		palreg = LGC_PALETTE(pipe);
4193
		palreg = LGC_PALETTE(pipe);
3884
 
4194
 
3885
	/* Workaround : Do not read or write the pipe palette/gamma data while
4195
	/* Workaround : Do not read or write the pipe palette/gamma data while
3886
	 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
4196
	 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
3887
	 */
4197
	 */
3888
	if (IS_HASWELL(dev) && intel_crtc->config.ips_enabled &&
4198
	if (IS_HASWELL(dev) && intel_crtc->config.ips_enabled &&
3889
	    ((I915_READ(GAMMA_MODE(pipe)) & GAMMA_MODE_MODE_MASK) ==
4199
	    ((I915_READ(GAMMA_MODE(pipe)) & GAMMA_MODE_MODE_MASK) ==
3890
	     GAMMA_MODE_MODE_SPLIT)) {
4200
	     GAMMA_MODE_MODE_SPLIT)) {
3891
		hsw_disable_ips(intel_crtc);
4201
		hsw_disable_ips(intel_crtc);
3892
		reenable_ips = true;
4202
		reenable_ips = true;
3893
	}
4203
	}
3894
 
4204
 
3895
	for (i = 0; i < 256; i++) {
4205
	for (i = 0; i < 256; i++) {
3896
		I915_WRITE(palreg + 4 * i,
4206
		I915_WRITE(palreg + 4 * i,
3897
			   (intel_crtc->lut_r[i] << 16) |
4207
			   (intel_crtc->lut_r[i] << 16) |
3898
			   (intel_crtc->lut_g[i] << 8) |
4208
			   (intel_crtc->lut_g[i] << 8) |
3899
			   intel_crtc->lut_b[i]);
4209
			   intel_crtc->lut_b[i]);
3900
	}
4210
	}
3901
 
4211
 
3902
	if (reenable_ips)
4212
	if (reenable_ips)
3903
		hsw_enable_ips(intel_crtc);
4213
		hsw_enable_ips(intel_crtc);
3904
}
4214
}
3905
 
4215
 
3906
static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
4216
static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
3907
{
4217
{
3908
	if (!enable && intel_crtc->overlay) {
4218
	if (!enable && intel_crtc->overlay) {
3909
		struct drm_device *dev = intel_crtc->base.dev;
4219
		struct drm_device *dev = intel_crtc->base.dev;
3910
		struct drm_i915_private *dev_priv = dev->dev_private;
4220
		struct drm_i915_private *dev_priv = dev->dev_private;
3911
 
4221
 
3912
		mutex_lock(&dev->struct_mutex);
4222
		mutex_lock(&dev->struct_mutex);
3913
		dev_priv->mm.interruptible = false;
4223
		dev_priv->mm.interruptible = false;
-
 
4224
//       (void) intel_overlay_switch_off(intel_crtc->overlay);
3914
        dev_priv->mm.interruptible = true;
4225
        dev_priv->mm.interruptible = true;
3915
		mutex_unlock(&dev->struct_mutex);
4226
		mutex_unlock(&dev->struct_mutex);
3916
	}
4227
	}
3917
 
4228
 
3918
	/* Let userspace switch the overlay on again. In most cases userspace
4229
	/* Let userspace switch the overlay on again. In most cases userspace
3919
	 * has to recompute where to put it anyway.
4230
	 * has to recompute where to put it anyway.
3920
	 */
4231
	 */
3921
}
4232
}
3922
 
4233
 
3923
static void intel_crtc_enable_planes(struct drm_crtc *crtc)
4234
static void intel_crtc_enable_planes(struct drm_crtc *crtc)
3924
{
4235
{
3925
	struct drm_device *dev = crtc->dev;
4236
	struct drm_device *dev = crtc->dev;
3926
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
3927
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4237
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3928
	int pipe = intel_crtc->pipe;
4238
	int pipe = intel_crtc->pipe;
3929
	int plane = intel_crtc->plane;
-
 
3930
 
-
 
3931
	drm_vblank_on(dev, pipe);
-
 
3932
 
4239
 
3933
	intel_enable_primary_hw_plane(dev_priv, plane, pipe);
4240
	intel_enable_primary_hw_plane(crtc->primary, crtc);
3934
	intel_enable_planes(crtc);
4241
	intel_enable_planes(crtc);
3935
	intel_crtc_update_cursor(crtc, true);
4242
	intel_crtc_update_cursor(crtc, true);
3936
	intel_crtc_dpms_overlay(intel_crtc, true);
4243
	intel_crtc_dpms_overlay(intel_crtc, true);
3937
 
4244
 
3938
	hsw_enable_ips(intel_crtc);
4245
	hsw_enable_ips(intel_crtc);
3939
 
4246
 
3940
	mutex_lock(&dev->struct_mutex);
4247
	mutex_lock(&dev->struct_mutex);
3941
	intel_update_fbc(dev);
4248
	intel_update_fbc(dev);
3942
	mutex_unlock(&dev->struct_mutex);
4249
	mutex_unlock(&dev->struct_mutex);
-
 
4250
 
-
 
4251
	/*
-
 
4252
	 * FIXME: Once we grow proper nuclear flip support out of this we need
-
 
4253
	 * to compute the mask of flip planes precisely. For the time being
-
 
4254
	 * consider this a flip from a NULL plane.
-
 
4255
	 */
-
 
4256
	intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_ALL_MASK(pipe));
3943
}
4257
}
3944
 
4258
 
3945
static void intel_crtc_disable_planes(struct drm_crtc *crtc)
4259
static void intel_crtc_disable_planes(struct drm_crtc *crtc)
3946
{
4260
{
3947
	struct drm_device *dev = crtc->dev;
4261
	struct drm_device *dev = crtc->dev;
3948
	struct drm_i915_private *dev_priv = dev->dev_private;
4262
	struct drm_i915_private *dev_priv = dev->dev_private;
3949
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4263
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3950
	int pipe = intel_crtc->pipe;
4264
	int pipe = intel_crtc->pipe;
3951
	int plane = intel_crtc->plane;
4265
	int plane = intel_crtc->plane;
3952
 
4266
 
3953
 
4267
 
3954
	if (dev_priv->fbc.plane == plane)
4268
	if (dev_priv->fbc.plane == plane)
3955
		intel_disable_fbc(dev);
4269
		intel_disable_fbc(dev);
3956
 
4270
 
3957
	hsw_disable_ips(intel_crtc);
4271
	hsw_disable_ips(intel_crtc);
3958
 
4272
 
3959
	intel_crtc_dpms_overlay(intel_crtc, false);
4273
	intel_crtc_dpms_overlay(intel_crtc, false);
3960
	intel_crtc_update_cursor(crtc, false);
4274
	intel_crtc_update_cursor(crtc, false);
3961
	intel_disable_planes(crtc);
4275
	intel_disable_planes(crtc);
3962
	intel_disable_primary_hw_plane(dev_priv, plane, pipe);
4276
	intel_disable_primary_hw_plane(crtc->primary, crtc);
-
 
4277
 
-
 
4278
	/*
-
 
4279
	 * FIXME: Once we grow proper nuclear flip support out of this we need
-
 
4280
	 * to compute the mask of flip planes precisely. For the time being
3963
	drm_vblank_off(dev, pipe);
4281
	 * consider this a flip to a NULL plane.
-
 
4282
	 */
-
 
4283
//	intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_ALL_MASK(pipe));
3964
}
4284
}
3965
 
4285
 
3966
static void ironlake_crtc_enable(struct drm_crtc *crtc)
4286
static void ironlake_crtc_enable(struct drm_crtc *crtc)
3967
{
4287
{
3968
    struct drm_device *dev = crtc->dev;
4288
    struct drm_device *dev = crtc->dev;
3969
    struct drm_i915_private *dev_priv = dev->dev_private;
4289
    struct drm_i915_private *dev_priv = dev->dev_private;
3970
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4290
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3971
	struct intel_encoder *encoder;
4291
	struct intel_encoder *encoder;
3972
    int pipe = intel_crtc->pipe;
4292
    int pipe = intel_crtc->pipe;
3973
	enum plane plane = intel_crtc->plane;
-
 
3974
 
4293
 
3975
	WARN_ON(!crtc->enabled);
4294
	WARN_ON(!crtc->enabled);
3976
 
4295
 
3977
    if (intel_crtc->active)
4296
    if (intel_crtc->active)
3978
        return;
4297
        return;
3979
 
4298
 
3980
	if (intel_crtc->config.has_pch_encoder)
4299
	if (intel_crtc->config.has_pch_encoder)
3981
		intel_prepare_shared_dpll(intel_crtc);
4300
		intel_prepare_shared_dpll(intel_crtc);
3982
 
4301
 
3983
	if (intel_crtc->config.has_dp_encoder)
4302
	if (intel_crtc->config.has_dp_encoder)
3984
		intel_dp_set_m_n(intel_crtc);
4303
		intel_dp_set_m_n(intel_crtc);
3985
 
4304
 
3986
	intel_set_pipe_timings(intel_crtc);
4305
	intel_set_pipe_timings(intel_crtc);
3987
 
4306
 
3988
	if (intel_crtc->config.has_pch_encoder) {
4307
	if (intel_crtc->config.has_pch_encoder) {
3989
		intel_cpu_transcoder_set_m_n(intel_crtc,
4308
		intel_cpu_transcoder_set_m_n(intel_crtc,
3990
					     &intel_crtc->config.fdi_m_n);
4309
				     &intel_crtc->config.fdi_m_n, NULL);
3991
	}
4310
	}
3992
 
4311
 
3993
	ironlake_set_pipeconf(crtc);
4312
	ironlake_set_pipeconf(crtc);
3994
 
-
 
3995
	/* Set up the display plane register */
-
 
3996
	I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE);
-
 
3997
	POSTING_READ(DSPCNTR(plane));
-
 
3998
 
-
 
3999
	dev_priv->display.update_primary_plane(crtc, crtc->primary->fb,
-
 
4000
					       crtc->x, crtc->y);
-
 
4001
 
4313
 
4002
    intel_crtc->active = true;
4314
    intel_crtc->active = true;
4003
 
4315
 
4004
	intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
4316
	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
4005
	intel_set_pch_fifo_underrun_reporting(dev, pipe, true);
4317
	intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
4006
 
4318
 
4007
	for_each_encoder_on_crtc(dev, crtc, encoder)
4319
	for_each_encoder_on_crtc(dev, crtc, encoder)
4008
		if (encoder->pre_enable)
4320
		if (encoder->pre_enable)
4009
			encoder->pre_enable(encoder);
4321
			encoder->pre_enable(encoder);
4010
 
4322
 
4011
	if (intel_crtc->config.has_pch_encoder) {
4323
	if (intel_crtc->config.has_pch_encoder) {
4012
		/* Note: FDI PLL enabling _must_ be done before we enable the
4324
		/* Note: FDI PLL enabling _must_ be done before we enable the
4013
		 * cpu pipes, hence this is separate from all the other fdi/pch
4325
		 * cpu pipes, hence this is separate from all the other fdi/pch
4014
		 * enabling. */
4326
		 * enabling. */
4015
		ironlake_fdi_pll_enable(intel_crtc);
4327
		ironlake_fdi_pll_enable(intel_crtc);
4016
	} else {
4328
	} else {
4017
		assert_fdi_tx_disabled(dev_priv, pipe);
4329
		assert_fdi_tx_disabled(dev_priv, pipe);
4018
		assert_fdi_rx_disabled(dev_priv, pipe);
4330
		assert_fdi_rx_disabled(dev_priv, pipe);
4019
	}
4331
	}
4020
 
4332
 
4021
	ironlake_pfit_enable(intel_crtc);
4333
	ironlake_pfit_enable(intel_crtc);
4022
 
4334
 
4023
    /*
4335
    /*
4024
     * On ILK+ LUT must be loaded before the pipe is running but with
4336
     * On ILK+ LUT must be loaded before the pipe is running but with
4025
     * clocks enabled
4337
     * clocks enabled
4026
     */
4338
     */
4027
    intel_crtc_load_lut(crtc);
4339
    intel_crtc_load_lut(crtc);
4028
 
4340
 
4029
	intel_update_watermarks(crtc);
4341
	intel_update_watermarks(crtc);
4030
	intel_enable_pipe(intel_crtc);
4342
	intel_enable_pipe(intel_crtc);
4031
 
4343
 
4032
	if (intel_crtc->config.has_pch_encoder)
4344
	if (intel_crtc->config.has_pch_encoder)
4033
        ironlake_pch_enable(crtc);
4345
        ironlake_pch_enable(crtc);
4034
 
4346
 
4035
	for_each_encoder_on_crtc(dev, crtc, encoder)
4347
	for_each_encoder_on_crtc(dev, crtc, encoder)
4036
		encoder->enable(encoder);
4348
		encoder->enable(encoder);
4037
 
4349
 
4038
	if (HAS_PCH_CPT(dev))
4350
	if (HAS_PCH_CPT(dev))
4039
		cpt_verify_modeset(dev, intel_crtc->pipe);
4351
		cpt_verify_modeset(dev, intel_crtc->pipe);
-
 
4352
 
-
 
4353
	assert_vblank_disabled(crtc);
-
 
4354
	drm_crtc_vblank_on(crtc);
4040
 
4355
 
4041
	intel_crtc_enable_planes(crtc);
4356
	intel_crtc_enable_planes(crtc);
4042
}
4357
}
4043
 
4358
 
4044
/* IPS only exists on ULT machines and is tied to pipe A. */
4359
/* IPS only exists on ULT machines and is tied to pipe A. */
4045
static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
4360
static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
4046
{
4361
{
4047
	return HAS_IPS(crtc->base.dev) && crtc->pipe == PIPE_A;
4362
	return HAS_IPS(crtc->base.dev) && crtc->pipe == PIPE_A;
4048
}
4363
}
4049
 
4364
 
4050
/*
4365
/*
4051
 * This implements the workaround described in the "notes" section of the mode
4366
 * This implements the workaround described in the "notes" section of the mode
4052
 * set sequence documentation. When going from no pipes or single pipe to
4367
 * set sequence documentation. When going from no pipes or single pipe to
4053
 * multiple pipes, and planes are enabled after the pipe, we need to wait at
4368
 * multiple pipes, and planes are enabled after the pipe, we need to wait at
4054
 * least 2 vblanks on the first pipe before enabling planes on the second pipe.
4369
 * least 2 vblanks on the first pipe before enabling planes on the second pipe.
4055
 */
4370
 */
4056
static void haswell_mode_set_planes_workaround(struct intel_crtc *crtc)
4371
static void haswell_mode_set_planes_workaround(struct intel_crtc *crtc)
4057
{
4372
{
4058
	struct drm_device *dev = crtc->base.dev;
4373
	struct drm_device *dev = crtc->base.dev;
4059
	struct intel_crtc *crtc_it, *other_active_crtc = NULL;
4374
	struct intel_crtc *crtc_it, *other_active_crtc = NULL;
4060
 
4375
 
4061
	/* We want to get the other_active_crtc only if there's only 1 other
4376
	/* We want to get the other_active_crtc only if there's only 1 other
4062
	 * active crtc. */
4377
	 * active crtc. */
4063
	for_each_intel_crtc(dev, crtc_it) {
4378
	for_each_intel_crtc(dev, crtc_it) {
4064
		if (!crtc_it->active || crtc_it == crtc)
4379
		if (!crtc_it->active || crtc_it == crtc)
4065
			continue;
4380
			continue;
4066
 
4381
 
4067
		if (other_active_crtc)
4382
		if (other_active_crtc)
4068
		return;
4383
		return;
4069
 
4384
 
4070
		other_active_crtc = crtc_it;
4385
		other_active_crtc = crtc_it;
4071
	}
4386
	}
4072
	if (!other_active_crtc)
4387
	if (!other_active_crtc)
4073
		return;
4388
		return;
4074
 
4389
 
4075
	intel_wait_for_vblank(dev, other_active_crtc->pipe);
4390
	intel_wait_for_vblank(dev, other_active_crtc->pipe);
4076
	intel_wait_for_vblank(dev, other_active_crtc->pipe);
4391
	intel_wait_for_vblank(dev, other_active_crtc->pipe);
4077
}
4392
}
4078
 
4393
 
4079
static void haswell_crtc_enable(struct drm_crtc *crtc)
4394
static void haswell_crtc_enable(struct drm_crtc *crtc)
4080
{
4395
{
4081
	struct drm_device *dev = crtc->dev;
4396
	struct drm_device *dev = crtc->dev;
4082
	struct drm_i915_private *dev_priv = dev->dev_private;
4397
	struct drm_i915_private *dev_priv = dev->dev_private;
4083
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4398
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4084
	struct intel_encoder *encoder;
4399
	struct intel_encoder *encoder;
4085
	int pipe = intel_crtc->pipe;
4400
	int pipe = intel_crtc->pipe;
4086
	enum plane plane = intel_crtc->plane;
-
 
4087
 
4401
 
4088
	WARN_ON(!crtc->enabled);
4402
	WARN_ON(!crtc->enabled);
4089
 
4403
 
4090
	if (intel_crtc->active)
4404
	if (intel_crtc->active)
4091
		return;
4405
		return;
4092
 
4406
 
4093
	if (intel_crtc_to_shared_dpll(intel_crtc))
4407
	if (intel_crtc_to_shared_dpll(intel_crtc))
4094
		intel_enable_shared_dpll(intel_crtc);
4408
		intel_enable_shared_dpll(intel_crtc);
4095
 
4409
 
4096
	if (intel_crtc->config.has_dp_encoder)
4410
	if (intel_crtc->config.has_dp_encoder)
4097
		intel_dp_set_m_n(intel_crtc);
4411
		intel_dp_set_m_n(intel_crtc);
4098
 
4412
 
4099
	intel_set_pipe_timings(intel_crtc);
4413
	intel_set_pipe_timings(intel_crtc);
-
 
4414
 
-
 
4415
	if (intel_crtc->config.cpu_transcoder != TRANSCODER_EDP) {
-
 
4416
		I915_WRITE(PIPE_MULT(intel_crtc->config.cpu_transcoder),
-
 
4417
			   intel_crtc->config.pixel_multiplier - 1);
-
 
4418
	}
4100
 
4419
 
4101
	if (intel_crtc->config.has_pch_encoder) {
4420
	if (intel_crtc->config.has_pch_encoder) {
4102
		intel_cpu_transcoder_set_m_n(intel_crtc,
4421
		intel_cpu_transcoder_set_m_n(intel_crtc,
4103
					     &intel_crtc->config.fdi_m_n);
4422
				     &intel_crtc->config.fdi_m_n, NULL);
4104
	}
4423
	}
4105
 
4424
 
4106
	haswell_set_pipeconf(crtc);
4425
	haswell_set_pipeconf(crtc);
4107
 
4426
 
4108
	intel_set_pipe_csc(crtc);
4427
	intel_set_pipe_csc(crtc);
4109
 
-
 
4110
	/* Set up the display plane register */
-
 
4111
	I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE | DISPPLANE_PIPE_CSC_ENABLE);
-
 
4112
	POSTING_READ(DSPCNTR(plane));
-
 
4113
 
-
 
4114
	dev_priv->display.update_primary_plane(crtc, crtc->primary->fb,
-
 
4115
					       crtc->x, crtc->y);
-
 
4116
 
4428
 
4117
	intel_crtc->active = true;
4429
	intel_crtc->active = true;
4118
 
4430
 
4119
	intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
4431
	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
4120
	for_each_encoder_on_crtc(dev, crtc, encoder)
4432
	for_each_encoder_on_crtc(dev, crtc, encoder)
4121
		if (encoder->pre_enable)
4433
		if (encoder->pre_enable)
4122
			encoder->pre_enable(encoder);
4434
			encoder->pre_enable(encoder);
4123
 
4435
 
4124
	if (intel_crtc->config.has_pch_encoder) {
4436
	if (intel_crtc->config.has_pch_encoder) {
4125
		intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, true);
4437
		intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
-
 
4438
						      true);
4126
		dev_priv->display.fdi_link_train(crtc);
4439
		dev_priv->display.fdi_link_train(crtc);
4127
	}
4440
	}
4128
 
4441
 
4129
	intel_ddi_enable_pipe_clock(intel_crtc);
4442
	intel_ddi_enable_pipe_clock(intel_crtc);
-
 
4443
 
-
 
4444
	if (IS_SKYLAKE(dev))
-
 
4445
		skylake_pfit_enable(intel_crtc);
4130
 
4446
	else
4131
	ironlake_pfit_enable(intel_crtc);
4447
	ironlake_pfit_enable(intel_crtc);
4132
 
4448
 
4133
	/*
4449
	/*
4134
	 * On ILK+ LUT must be loaded before the pipe is running but with
4450
	 * On ILK+ LUT must be loaded before the pipe is running but with
4135
	 * clocks enabled
4451
	 * clocks enabled
4136
	 */
4452
	 */
4137
	intel_crtc_load_lut(crtc);
4453
	intel_crtc_load_lut(crtc);
4138
 
4454
 
4139
	intel_ddi_set_pipe_settings(crtc);
4455
	intel_ddi_set_pipe_settings(crtc);
4140
	intel_ddi_enable_transcoder_func(crtc);
4456
	intel_ddi_enable_transcoder_func(crtc);
4141
 
4457
 
4142
	intel_update_watermarks(crtc);
4458
	intel_update_watermarks(crtc);
4143
	intel_enable_pipe(intel_crtc);
4459
	intel_enable_pipe(intel_crtc);
4144
 
4460
 
4145
	if (intel_crtc->config.has_pch_encoder)
4461
	if (intel_crtc->config.has_pch_encoder)
4146
		lpt_pch_enable(crtc);
4462
		lpt_pch_enable(crtc);
4147
 
4463
 
4148
	if (intel_crtc->config.dp_encoder_is_mst)
4464
	if (intel_crtc->config.dp_encoder_is_mst)
4149
		intel_ddi_set_vc_payload_alloc(crtc, true);
4465
		intel_ddi_set_vc_payload_alloc(crtc, true);
4150
 
4466
 
4151
	for_each_encoder_on_crtc(dev, crtc, encoder) {
4467
	for_each_encoder_on_crtc(dev, crtc, encoder) {
4152
		encoder->enable(encoder);
4468
		encoder->enable(encoder);
4153
		intel_opregion_notify_encoder(encoder, true);
4469
		intel_opregion_notify_encoder(encoder, true);
4154
	}
4470
	}
-
 
4471
 
-
 
4472
	assert_vblank_disabled(crtc);
-
 
4473
	drm_crtc_vblank_on(crtc);
4155
 
4474
 
4156
	/* If we change the relative order between pipe/planes enabling, we need
4475
	/* If we change the relative order between pipe/planes enabling, we need
4157
	 * to change the workaround. */
4476
	 * to change the workaround. */
4158
	haswell_mode_set_planes_workaround(intel_crtc);
4477
	haswell_mode_set_planes_workaround(intel_crtc);
4159
	intel_crtc_enable_planes(crtc);
4478
	intel_crtc_enable_planes(crtc);
4160
}
4479
}
-
 
4480
 
-
 
4481
static void skylake_pfit_disable(struct intel_crtc *crtc)
-
 
4482
{
-
 
4483
	struct drm_device *dev = crtc->base.dev;
-
 
4484
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
4485
	int pipe = crtc->pipe;
-
 
4486
 
-
 
4487
	/* To avoid upsetting the power well on haswell only disable the pfit if
-
 
4488
	 * it's in use. The hw state code will make sure we get this right. */
-
 
4489
	if (crtc->config.pch_pfit.enabled) {
-
 
4490
		I915_WRITE(PS_CTL(pipe), 0);
-
 
4491
		I915_WRITE(PS_WIN_POS(pipe), 0);
-
 
4492
		I915_WRITE(PS_WIN_SZ(pipe), 0);
-
 
4493
	}
-
 
4494
}
4161
 
4495
 
4162
static void ironlake_pfit_disable(struct intel_crtc *crtc)
4496
static void ironlake_pfit_disable(struct intel_crtc *crtc)
4163
{
4497
{
4164
	struct drm_device *dev = crtc->base.dev;
4498
	struct drm_device *dev = crtc->base.dev;
4165
	struct drm_i915_private *dev_priv = dev->dev_private;
4499
	struct drm_i915_private *dev_priv = dev->dev_private;
4166
	int pipe = crtc->pipe;
4500
	int pipe = crtc->pipe;
4167
 
4501
 
4168
	/* To avoid upsetting the power well on haswell only disable the pfit if
4502
	/* To avoid upsetting the power well on haswell only disable the pfit if
4169
	 * it's in use. The hw state code will make sure we get this right. */
4503
	 * it's in use. The hw state code will make sure we get this right. */
4170
	if (crtc->config.pch_pfit.enabled) {
4504
	if (crtc->config.pch_pfit.enabled) {
4171
		I915_WRITE(PF_CTL(pipe), 0);
4505
		I915_WRITE(PF_CTL(pipe), 0);
4172
		I915_WRITE(PF_WIN_POS(pipe), 0);
4506
		I915_WRITE(PF_WIN_POS(pipe), 0);
4173
		I915_WRITE(PF_WIN_SZ(pipe), 0);
4507
		I915_WRITE(PF_WIN_SZ(pipe), 0);
4174
	}
4508
	}
4175
}
4509
}
4176
 
4510
 
4177
static void ironlake_crtc_disable(struct drm_crtc *crtc)
4511
static void ironlake_crtc_disable(struct drm_crtc *crtc)
4178
{
4512
{
4179
    struct drm_device *dev = crtc->dev;
4513
    struct drm_device *dev = crtc->dev;
4180
    struct drm_i915_private *dev_priv = dev->dev_private;
4514
    struct drm_i915_private *dev_priv = dev->dev_private;
4181
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4515
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4182
	struct intel_encoder *encoder;
4516
	struct intel_encoder *encoder;
4183
    int pipe = intel_crtc->pipe;
4517
    int pipe = intel_crtc->pipe;
4184
    u32 reg, temp;
4518
    u32 reg, temp;
4185
 
4519
 
4186
    if (!intel_crtc->active)
4520
    if (!intel_crtc->active)
4187
        return;
4521
        return;
4188
 
4522
 
4189
	intel_crtc_disable_planes(crtc);
4523
	intel_crtc_disable_planes(crtc);
-
 
4524
 
-
 
4525
	drm_crtc_vblank_off(crtc);
-
 
4526
	assert_vblank_disabled(crtc);
4190
 
4527
 
4191
	for_each_encoder_on_crtc(dev, crtc, encoder)
4528
	for_each_encoder_on_crtc(dev, crtc, encoder)
4192
		encoder->disable(encoder);
4529
		encoder->disable(encoder);
4193
 
4530
 
4194
	if (intel_crtc->config.has_pch_encoder)
4531
	if (intel_crtc->config.has_pch_encoder)
4195
		intel_set_pch_fifo_underrun_reporting(dev, pipe, false);
4532
		intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
-
 
4533
 
-
 
4534
	intel_disable_pipe(intel_crtc);
4196
 
-
 
4197
    intel_disable_pipe(dev_priv, pipe);
4535
 
4198
	ironlake_pfit_disable(intel_crtc);
4536
	ironlake_pfit_disable(intel_crtc);
4199
 
4537
 
4200
	for_each_encoder_on_crtc(dev, crtc, encoder)
4538
	for_each_encoder_on_crtc(dev, crtc, encoder)
4201
		if (encoder->post_disable)
4539
		if (encoder->post_disable)
4202
			encoder->post_disable(encoder);
4540
			encoder->post_disable(encoder);
4203
 
4541
 
4204
	if (intel_crtc->config.has_pch_encoder) {
4542
	if (intel_crtc->config.has_pch_encoder) {
4205
    ironlake_fdi_disable(crtc);
4543
    ironlake_fdi_disable(crtc);
4206
 
4544
 
4207
	ironlake_disable_pch_transcoder(dev_priv, pipe);
4545
	ironlake_disable_pch_transcoder(dev_priv, pipe);
4208
		intel_set_pch_fifo_underrun_reporting(dev, pipe, true);
-
 
4209
 
4546
 
4210
    if (HAS_PCH_CPT(dev)) {
4547
    if (HAS_PCH_CPT(dev)) {
4211
        /* disable TRANS_DP_CTL */
4548
        /* disable TRANS_DP_CTL */
4212
        reg = TRANS_DP_CTL(pipe);
4549
        reg = TRANS_DP_CTL(pipe);
4213
        temp = I915_READ(reg);
4550
        temp = I915_READ(reg);
4214
			temp &= ~(TRANS_DP_OUTPUT_ENABLE |
4551
			temp &= ~(TRANS_DP_OUTPUT_ENABLE |
4215
				  TRANS_DP_PORT_SEL_MASK);
4552
				  TRANS_DP_PORT_SEL_MASK);
4216
        temp |= TRANS_DP_PORT_SEL_NONE;
4553
        temp |= TRANS_DP_PORT_SEL_NONE;
4217
        I915_WRITE(reg, temp);
4554
        I915_WRITE(reg, temp);
4218
 
4555
 
4219
        /* disable DPLL_SEL */
4556
        /* disable DPLL_SEL */
4220
        temp = I915_READ(PCH_DPLL_SEL);
4557
        temp = I915_READ(PCH_DPLL_SEL);
4221
			temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe));
4558
			temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe));
4222
        I915_WRITE(PCH_DPLL_SEL, temp);
4559
        I915_WRITE(PCH_DPLL_SEL, temp);
4223
    }
4560
    }
4224
 
4561
 
4225
    /* disable PCH DPLL */
4562
    /* disable PCH DPLL */
4226
		intel_disable_shared_dpll(intel_crtc);
4563
		intel_disable_shared_dpll(intel_crtc);
4227
 
4564
 
4228
	ironlake_fdi_pll_disable(intel_crtc);
4565
	ironlake_fdi_pll_disable(intel_crtc);
4229
	}
4566
	}
4230
 
4567
 
4231
    intel_crtc->active = false;
4568
    intel_crtc->active = false;
4232
	intel_update_watermarks(crtc);
4569
	intel_update_watermarks(crtc);
4233
 
4570
 
4234
    mutex_lock(&dev->struct_mutex);
4571
    mutex_lock(&dev->struct_mutex);
4235
    intel_update_fbc(dev);
4572
    intel_update_fbc(dev);
4236
    mutex_unlock(&dev->struct_mutex);
4573
    mutex_unlock(&dev->struct_mutex);
4237
}
4574
}
4238
 
4575
 
4239
static void haswell_crtc_disable(struct drm_crtc *crtc)
4576
static void haswell_crtc_disable(struct drm_crtc *crtc)
4240
{
4577
{
4241
	struct drm_device *dev = crtc->dev;
4578
	struct drm_device *dev = crtc->dev;
4242
	struct drm_i915_private *dev_priv = dev->dev_private;
4579
	struct drm_i915_private *dev_priv = dev->dev_private;
4243
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4580
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4244
	struct intel_encoder *encoder;
4581
	struct intel_encoder *encoder;
4245
	int pipe = intel_crtc->pipe;
-
 
4246
	enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
4582
	enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
4247
 
4583
 
4248
	if (!intel_crtc->active)
4584
	if (!intel_crtc->active)
4249
		return;
4585
		return;
4250
 
4586
 
4251
	intel_crtc_disable_planes(crtc);
4587
	intel_crtc_disable_planes(crtc);
-
 
4588
 
-
 
4589
	drm_crtc_vblank_off(crtc);
-
 
4590
	assert_vblank_disabled(crtc);
4252
 
4591
 
4253
	for_each_encoder_on_crtc(dev, crtc, encoder) {
4592
	for_each_encoder_on_crtc(dev, crtc, encoder) {
4254
		intel_opregion_notify_encoder(encoder, false);
4593
		intel_opregion_notify_encoder(encoder, false);
4255
		encoder->disable(encoder);
4594
		encoder->disable(encoder);
4256
	}
4595
	}
4257
 
4596
 
4258
	if (intel_crtc->config.has_pch_encoder)
4597
	if (intel_crtc->config.has_pch_encoder)
4259
		intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, false);
4598
		intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
-
 
4599
						      false);
4260
	intel_disable_pipe(dev_priv, pipe);
4600
	intel_disable_pipe(intel_crtc);
4261
 
4601
 
4262
	if (intel_crtc->config.dp_encoder_is_mst)
4602
	if (intel_crtc->config.dp_encoder_is_mst)
4263
		intel_ddi_set_vc_payload_alloc(crtc, false);
4603
		intel_ddi_set_vc_payload_alloc(crtc, false);
4264
 
4604
 
4265
	intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
4605
	intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
-
 
4606
 
-
 
4607
	if (IS_SKYLAKE(dev))
-
 
4608
		skylake_pfit_disable(intel_crtc);
4266
 
4609
	else
4267
	ironlake_pfit_disable(intel_crtc);
4610
	ironlake_pfit_disable(intel_crtc);
4268
 
4611
 
4269
	intel_ddi_disable_pipe_clock(intel_crtc);
4612
	intel_ddi_disable_pipe_clock(intel_crtc);
4270
 
4613
 
4271
	if (intel_crtc->config.has_pch_encoder) {
4614
	if (intel_crtc->config.has_pch_encoder) {
4272
		lpt_disable_pch_transcoder(dev_priv);
4615
		lpt_disable_pch_transcoder(dev_priv);
4273
		intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, true);
-
 
4274
		intel_ddi_fdi_disable(crtc);
4616
		intel_ddi_fdi_disable(crtc);
4275
	}
4617
	}
4276
 
4618
 
4277
	for_each_encoder_on_crtc(dev, crtc, encoder)
4619
	for_each_encoder_on_crtc(dev, crtc, encoder)
4278
		if (encoder->post_disable)
4620
		if (encoder->post_disable)
4279
			encoder->post_disable(encoder);
4621
			encoder->post_disable(encoder);
4280
 
4622
 
4281
	intel_crtc->active = false;
4623
	intel_crtc->active = false;
4282
	intel_update_watermarks(crtc);
4624
	intel_update_watermarks(crtc);
4283
 
4625
 
4284
	mutex_lock(&dev->struct_mutex);
4626
	mutex_lock(&dev->struct_mutex);
4285
	intel_update_fbc(dev);
4627
	intel_update_fbc(dev);
4286
	mutex_unlock(&dev->struct_mutex);
4628
	mutex_unlock(&dev->struct_mutex);
4287
 
4629
 
4288
	if (intel_crtc_to_shared_dpll(intel_crtc))
4630
	if (intel_crtc_to_shared_dpll(intel_crtc))
4289
		intel_disable_shared_dpll(intel_crtc);
4631
		intel_disable_shared_dpll(intel_crtc);
4290
}
4632
}
4291
 
4633
 
4292
static void ironlake_crtc_off(struct drm_crtc *crtc)
4634
static void ironlake_crtc_off(struct drm_crtc *crtc)
4293
{
4635
{
4294
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4636
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4295
	intel_put_shared_dpll(intel_crtc);
4637
	intel_put_shared_dpll(intel_crtc);
4296
}
4638
}
4297
 
4639
 
4298
 
4640
 
4299
static void i9xx_pfit_enable(struct intel_crtc *crtc)
4641
static void i9xx_pfit_enable(struct intel_crtc *crtc)
4300
{
4642
{
4301
	struct drm_device *dev = crtc->base.dev;
4643
	struct drm_device *dev = crtc->base.dev;
4302
	struct drm_i915_private *dev_priv = dev->dev_private;
4644
	struct drm_i915_private *dev_priv = dev->dev_private;
4303
	struct intel_crtc_config *pipe_config = &crtc->config;
4645
	struct intel_crtc_config *pipe_config = &crtc->config;
4304
 
4646
 
4305
	if (!crtc->config.gmch_pfit.control)
4647
	if (!crtc->config.gmch_pfit.control)
4306
		return;
4648
		return;
4307
 
4649
 
4308
	/*
4650
	/*
4309
	 * The panel fitter should only be adjusted whilst the pipe is disabled,
4651
	 * The panel fitter should only be adjusted whilst the pipe is disabled,
4310
	 * according to register description and PRM.
4652
	 * according to register description and PRM.
4311
	 */
4653
	 */
4312
	WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE);
4654
	WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE);
4313
	assert_pipe_disabled(dev_priv, crtc->pipe);
4655
	assert_pipe_disabled(dev_priv, crtc->pipe);
4314
 
4656
 
4315
	I915_WRITE(PFIT_PGM_RATIOS, pipe_config->gmch_pfit.pgm_ratios);
4657
	I915_WRITE(PFIT_PGM_RATIOS, pipe_config->gmch_pfit.pgm_ratios);
4316
	I915_WRITE(PFIT_CONTROL, pipe_config->gmch_pfit.control);
4658
	I915_WRITE(PFIT_CONTROL, pipe_config->gmch_pfit.control);
4317
 
4659
 
4318
	/* Border color in case we don't scale up to the full screen. Black by
4660
	/* Border color in case we don't scale up to the full screen. Black by
4319
	 * default, change to something else for debugging. */
4661
	 * default, change to something else for debugging. */
4320
	I915_WRITE(BCLRPAT(crtc->pipe), 0);
4662
	I915_WRITE(BCLRPAT(crtc->pipe), 0);
4321
}
4663
}
4322
 
4664
 
4323
static enum intel_display_power_domain port_to_power_domain(enum port port)
4665
static enum intel_display_power_domain port_to_power_domain(enum port port)
4324
{
4666
{
4325
	switch (port) {
4667
	switch (port) {
4326
	case PORT_A:
4668
	case PORT_A:
4327
		return POWER_DOMAIN_PORT_DDI_A_4_LANES;
4669
		return POWER_DOMAIN_PORT_DDI_A_4_LANES;
4328
	case PORT_B:
4670
	case PORT_B:
4329
		return POWER_DOMAIN_PORT_DDI_B_4_LANES;
4671
		return POWER_DOMAIN_PORT_DDI_B_4_LANES;
4330
	case PORT_C:
4672
	case PORT_C:
4331
		return POWER_DOMAIN_PORT_DDI_C_4_LANES;
4673
		return POWER_DOMAIN_PORT_DDI_C_4_LANES;
4332
	case PORT_D:
4674
	case PORT_D:
4333
		return POWER_DOMAIN_PORT_DDI_D_4_LANES;
4675
		return POWER_DOMAIN_PORT_DDI_D_4_LANES;
4334
	default:
4676
	default:
4335
		WARN_ON_ONCE(1);
4677
		WARN_ON_ONCE(1);
4336
		return POWER_DOMAIN_PORT_OTHER;
4678
		return POWER_DOMAIN_PORT_OTHER;
4337
	}
4679
	}
4338
}
4680
}
4339
 
4681
 
4340
#define for_each_power_domain(domain, mask)				\
4682
#define for_each_power_domain(domain, mask)				\
4341
	for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++)	\
4683
	for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++)	\
4342
		if ((1 << (domain)) & (mask))
4684
		if ((1 << (domain)) & (mask))
4343
 
4685
 
4344
enum intel_display_power_domain
4686
enum intel_display_power_domain
4345
intel_display_port_power_domain(struct intel_encoder *intel_encoder)
4687
intel_display_port_power_domain(struct intel_encoder *intel_encoder)
4346
{
4688
{
4347
	struct drm_device *dev = intel_encoder->base.dev;
4689
	struct drm_device *dev = intel_encoder->base.dev;
4348
	struct intel_digital_port *intel_dig_port;
4690
	struct intel_digital_port *intel_dig_port;
4349
 
4691
 
4350
	switch (intel_encoder->type) {
4692
	switch (intel_encoder->type) {
4351
	case INTEL_OUTPUT_UNKNOWN:
4693
	case INTEL_OUTPUT_UNKNOWN:
4352
		/* Only DDI platforms should ever use this output type */
4694
		/* Only DDI platforms should ever use this output type */
4353
		WARN_ON_ONCE(!HAS_DDI(dev));
4695
		WARN_ON_ONCE(!HAS_DDI(dev));
4354
	case INTEL_OUTPUT_DISPLAYPORT:
4696
	case INTEL_OUTPUT_DISPLAYPORT:
4355
	case INTEL_OUTPUT_HDMI:
4697
	case INTEL_OUTPUT_HDMI:
4356
	case INTEL_OUTPUT_EDP:
4698
	case INTEL_OUTPUT_EDP:
4357
		intel_dig_port = enc_to_dig_port(&intel_encoder->base);
4699
		intel_dig_port = enc_to_dig_port(&intel_encoder->base);
4358
		return port_to_power_domain(intel_dig_port->port);
4700
		return port_to_power_domain(intel_dig_port->port);
4359
	case INTEL_OUTPUT_DP_MST:
4701
	case INTEL_OUTPUT_DP_MST:
4360
		intel_dig_port = enc_to_mst(&intel_encoder->base)->primary;
4702
		intel_dig_port = enc_to_mst(&intel_encoder->base)->primary;
4361
		return port_to_power_domain(intel_dig_port->port);
4703
		return port_to_power_domain(intel_dig_port->port);
4362
	case INTEL_OUTPUT_ANALOG:
4704
	case INTEL_OUTPUT_ANALOG:
4363
		return POWER_DOMAIN_PORT_CRT;
4705
		return POWER_DOMAIN_PORT_CRT;
4364
	case INTEL_OUTPUT_DSI:
4706
	case INTEL_OUTPUT_DSI:
4365
		return POWER_DOMAIN_PORT_DSI;
4707
		return POWER_DOMAIN_PORT_DSI;
4366
	default:
4708
	default:
4367
		return POWER_DOMAIN_PORT_OTHER;
4709
		return POWER_DOMAIN_PORT_OTHER;
4368
	}
4710
	}
4369
}
4711
}
4370
 
4712
 
4371
static unsigned long get_crtc_power_domains(struct drm_crtc *crtc)
4713
static unsigned long get_crtc_power_domains(struct drm_crtc *crtc)
4372
{
4714
{
4373
	struct drm_device *dev = crtc->dev;
4715
	struct drm_device *dev = crtc->dev;
4374
	struct intel_encoder *intel_encoder;
4716
	struct intel_encoder *intel_encoder;
4375
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4717
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4376
	enum pipe pipe = intel_crtc->pipe;
4718
	enum pipe pipe = intel_crtc->pipe;
4377
	unsigned long mask;
4719
	unsigned long mask;
4378
	enum transcoder transcoder;
4720
	enum transcoder transcoder;
4379
 
4721
 
4380
	transcoder = intel_pipe_to_cpu_transcoder(dev->dev_private, pipe);
4722
	transcoder = intel_pipe_to_cpu_transcoder(dev->dev_private, pipe);
4381
 
4723
 
4382
	mask = BIT(POWER_DOMAIN_PIPE(pipe));
4724
	mask = BIT(POWER_DOMAIN_PIPE(pipe));
4383
	mask |= BIT(POWER_DOMAIN_TRANSCODER(transcoder));
4725
	mask |= BIT(POWER_DOMAIN_TRANSCODER(transcoder));
4384
	if (intel_crtc->config.pch_pfit.enabled ||
4726
	if (intel_crtc->config.pch_pfit.enabled ||
4385
	    intel_crtc->config.pch_pfit.force_thru)
4727
	    intel_crtc->config.pch_pfit.force_thru)
4386
		mask |= BIT(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
4728
		mask |= BIT(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
4387
 
4729
 
4388
	for_each_encoder_on_crtc(dev, crtc, intel_encoder)
4730
	for_each_encoder_on_crtc(dev, crtc, intel_encoder)
4389
		mask |= BIT(intel_display_port_power_domain(intel_encoder));
4731
		mask |= BIT(intel_display_port_power_domain(intel_encoder));
4390
 
4732
 
4391
	return mask;
4733
	return mask;
4392
}
4734
}
4393
 
-
 
4394
void intel_display_set_init_power(struct drm_i915_private *dev_priv,
-
 
4395
				  bool enable)
-
 
4396
{
-
 
4397
	if (dev_priv->power_domains.init_power_on == enable)
-
 
4398
		return;
-
 
4399
 
-
 
4400
	if (enable)
-
 
4401
		intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
-
 
4402
	else
-
 
4403
		intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
-
 
4404
 
-
 
4405
	dev_priv->power_domains.init_power_on = enable;
-
 
4406
}
-
 
4407
 
4735
 
4408
static void modeset_update_crtc_power_domains(struct drm_device *dev)
4736
static void modeset_update_crtc_power_domains(struct drm_device *dev)
4409
{
4737
{
4410
	struct drm_i915_private *dev_priv = dev->dev_private;
4738
	struct drm_i915_private *dev_priv = dev->dev_private;
4411
	unsigned long pipe_domains[I915_MAX_PIPES] = { 0, };
4739
	unsigned long pipe_domains[I915_MAX_PIPES] = { 0, };
4412
	struct intel_crtc *crtc;
4740
	struct intel_crtc *crtc;
4413
 
4741
 
4414
	/*
4742
	/*
4415
	 * First get all needed power domains, then put all unneeded, to avoid
4743
	 * First get all needed power domains, then put all unneeded, to avoid
4416
	 * any unnecessary toggling of the power wells.
4744
	 * any unnecessary toggling of the power wells.
4417
	 */
4745
	 */
4418
	for_each_intel_crtc(dev, crtc) {
4746
	for_each_intel_crtc(dev, crtc) {
4419
		enum intel_display_power_domain domain;
4747
		enum intel_display_power_domain domain;
4420
 
4748
 
4421
		if (!crtc->base.enabled)
4749
		if (!crtc->base.enabled)
4422
			continue;
4750
			continue;
4423
 
4751
 
4424
		pipe_domains[crtc->pipe] = get_crtc_power_domains(&crtc->base);
4752
		pipe_domains[crtc->pipe] = get_crtc_power_domains(&crtc->base);
4425
 
4753
 
4426
		for_each_power_domain(domain, pipe_domains[crtc->pipe])
4754
		for_each_power_domain(domain, pipe_domains[crtc->pipe])
4427
			intel_display_power_get(dev_priv, domain);
4755
			intel_display_power_get(dev_priv, domain);
4428
	}
4756
	}
-
 
4757
 
-
 
4758
	if (dev_priv->display.modeset_global_resources)
-
 
4759
		dev_priv->display.modeset_global_resources(dev);
4429
 
4760
 
4430
	for_each_intel_crtc(dev, crtc) {
4761
	for_each_intel_crtc(dev, crtc) {
4431
		enum intel_display_power_domain domain;
4762
		enum intel_display_power_domain domain;
4432
 
4763
 
4433
		for_each_power_domain(domain, crtc->enabled_power_domains)
4764
		for_each_power_domain(domain, crtc->enabled_power_domains)
4434
			intel_display_power_put(dev_priv, domain);
4765
			intel_display_power_put(dev_priv, domain);
4435
 
4766
 
4436
		crtc->enabled_power_domains = pipe_domains[crtc->pipe];
4767
		crtc->enabled_power_domains = pipe_domains[crtc->pipe];
4437
	}
4768
	}
4438
 
4769
 
4439
	intel_display_set_init_power(dev_priv, false);
4770
	intel_display_set_init_power(dev_priv, false);
4440
}
4771
}
4441
 
4772
 
4442
/* returns HPLL frequency in kHz */
4773
/* returns HPLL frequency in kHz */
4443
static int valleyview_get_vco(struct drm_i915_private *dev_priv)
4774
static int valleyview_get_vco(struct drm_i915_private *dev_priv)
4444
{
4775
{
4445
	int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
4776
	int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
4446
 
4777
 
4447
	/* Obtain SKU information */
4778
	/* Obtain SKU information */
4448
	mutex_lock(&dev_priv->dpio_lock);
4779
	mutex_lock(&dev_priv->dpio_lock);
4449
	hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
4780
	hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
4450
		CCK_FUSE_HPLL_FREQ_MASK;
4781
		CCK_FUSE_HPLL_FREQ_MASK;
4451
	mutex_unlock(&dev_priv->dpio_lock);
4782
	mutex_unlock(&dev_priv->dpio_lock);
4452
 
4783
 
4453
	return vco_freq[hpll_freq] * 1000;
4784
	return vco_freq[hpll_freq] * 1000;
4454
}
4785
}
4455
 
4786
 
4456
static void vlv_update_cdclk(struct drm_device *dev)
4787
static void vlv_update_cdclk(struct drm_device *dev)
4457
{
4788
{
4458
	struct drm_i915_private *dev_priv = dev->dev_private;
4789
	struct drm_i915_private *dev_priv = dev->dev_private;
4459
 
4790
 
4460
	dev_priv->vlv_cdclk_freq = dev_priv->display.get_display_clock_speed(dev);
4791
	dev_priv->vlv_cdclk_freq = dev_priv->display.get_display_clock_speed(dev);
4461
	DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz",
4792
	DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz\n",
4462
			 dev_priv->vlv_cdclk_freq);
4793
			 dev_priv->vlv_cdclk_freq);
4463
 
4794
 
4464
	/*
4795
	/*
4465
	 * Program the gmbus_freq based on the cdclk frequency.
4796
	 * Program the gmbus_freq based on the cdclk frequency.
4466
	 * BSpec erroneously claims we should aim for 4MHz, but
4797
	 * BSpec erroneously claims we should aim for 4MHz, but
4467
	 * in fact 1MHz is the correct frequency.
4798
	 * in fact 1MHz is the correct frequency.
4468
	 */
4799
	 */
4469
	I915_WRITE(GMBUSFREQ_VLV, dev_priv->vlv_cdclk_freq);
4800
	I915_WRITE(GMBUSFREQ_VLV, DIV_ROUND_UP(dev_priv->vlv_cdclk_freq, 1000));
4470
}
4801
}
4471
 
4802
 
4472
/* Adjust CDclk dividers to allow high res or save power if possible */
4803
/* Adjust CDclk dividers to allow high res or save power if possible */
4473
static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
4804
static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
4474
{
4805
{
4475
	struct drm_i915_private *dev_priv = dev->dev_private;
4806
	struct drm_i915_private *dev_priv = dev->dev_private;
4476
	u32 val, cmd;
4807
	u32 val, cmd;
4477
 
4808
 
4478
	WARN_ON(dev_priv->display.get_display_clock_speed(dev) != dev_priv->vlv_cdclk_freq);
4809
	WARN_ON(dev_priv->display.get_display_clock_speed(dev) != dev_priv->vlv_cdclk_freq);
4479
 
4810
 
4480
	if (cdclk >= 320000) /* jump to highest voltage for 400MHz too */
4811
	if (cdclk >= 320000) /* jump to highest voltage for 400MHz too */
4481
		cmd = 2;
4812
		cmd = 2;
4482
	else if (cdclk == 266667)
4813
	else if (cdclk == 266667)
4483
		cmd = 1;
4814
		cmd = 1;
4484
	else
4815
	else
4485
		cmd = 0;
4816
		cmd = 0;
4486
 
4817
 
4487
	mutex_lock(&dev_priv->rps.hw_lock);
4818
	mutex_lock(&dev_priv->rps.hw_lock);
4488
	val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
4819
	val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
4489
	val &= ~DSPFREQGUAR_MASK;
4820
	val &= ~DSPFREQGUAR_MASK;
4490
	val |= (cmd << DSPFREQGUAR_SHIFT);
4821
	val |= (cmd << DSPFREQGUAR_SHIFT);
4491
	vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
4822
	vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
4492
	if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) &
4823
	if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) &
4493
		      DSPFREQSTAT_MASK) == (cmd << DSPFREQSTAT_SHIFT),
4824
		      DSPFREQSTAT_MASK) == (cmd << DSPFREQSTAT_SHIFT),
4494
		     50)) {
4825
		     50)) {
4495
		DRM_ERROR("timed out waiting for CDclk change\n");
4826
		DRM_ERROR("timed out waiting for CDclk change\n");
4496
	}
4827
	}
4497
	mutex_unlock(&dev_priv->rps.hw_lock);
4828
	mutex_unlock(&dev_priv->rps.hw_lock);
4498
 
4829
 
4499
	if (cdclk == 400000) {
4830
	if (cdclk == 400000) {
4500
		u32 divider, vco;
4831
		u32 divider;
4501
 
-
 
4502
		vco = valleyview_get_vco(dev_priv);
4832
 
4503
		divider = DIV_ROUND_CLOSEST(vco << 1, cdclk) - 1;
4833
		divider = DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, cdclk) - 1;
4504
 
4834
 
4505
		mutex_lock(&dev_priv->dpio_lock);
4835
		mutex_lock(&dev_priv->dpio_lock);
4506
		/* adjust cdclk divider */
4836
		/* adjust cdclk divider */
4507
		val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL);
4837
		val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL);
4508
		val &= ~DISPLAY_FREQUENCY_VALUES;
4838
		val &= ~DISPLAY_FREQUENCY_VALUES;
4509
		val |= divider;
4839
		val |= divider;
4510
		vlv_cck_write(dev_priv, CCK_DISPLAY_CLOCK_CONTROL, val);
4840
		vlv_cck_write(dev_priv, CCK_DISPLAY_CLOCK_CONTROL, val);
4511
 
4841
 
4512
		if (wait_for((vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL) &
4842
		if (wait_for((vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL) &
4513
			      DISPLAY_FREQUENCY_STATUS) == (divider << DISPLAY_FREQUENCY_STATUS_SHIFT),
4843
			      DISPLAY_FREQUENCY_STATUS) == (divider << DISPLAY_FREQUENCY_STATUS_SHIFT),
4514
			     50))
4844
			     50))
4515
			DRM_ERROR("timed out waiting for CDclk change\n");
4845
			DRM_ERROR("timed out waiting for CDclk change\n");
4516
		mutex_unlock(&dev_priv->dpio_lock);
4846
		mutex_unlock(&dev_priv->dpio_lock);
4517
	}
4847
	}
4518
 
4848
 
4519
	mutex_lock(&dev_priv->dpio_lock);
4849
	mutex_lock(&dev_priv->dpio_lock);
4520
	/* adjust self-refresh exit latency value */
4850
	/* adjust self-refresh exit latency value */
4521
	val = vlv_bunit_read(dev_priv, BUNIT_REG_BISOC);
4851
	val = vlv_bunit_read(dev_priv, BUNIT_REG_BISOC);
4522
	val &= ~0x7f;
4852
	val &= ~0x7f;
4523
 
4853
 
4524
	/*
4854
	/*
4525
	 * For high bandwidth configs, we set a higher latency in the bunit
4855
	 * For high bandwidth configs, we set a higher latency in the bunit
4526
	 * so that the core display fetch happens in time to avoid underruns.
4856
	 * so that the core display fetch happens in time to avoid underruns.
4527
	 */
4857
	 */
4528
	if (cdclk == 400000)
4858
	if (cdclk == 400000)
4529
		val |= 4500 / 250; /* 4.5 usec */
4859
		val |= 4500 / 250; /* 4.5 usec */
4530
	else
4860
	else
4531
		val |= 3000 / 250; /* 3.0 usec */
4861
		val |= 3000 / 250; /* 3.0 usec */
4532
	vlv_bunit_write(dev_priv, BUNIT_REG_BISOC, val);
4862
	vlv_bunit_write(dev_priv, BUNIT_REG_BISOC, val);
4533
	mutex_unlock(&dev_priv->dpio_lock);
4863
	mutex_unlock(&dev_priv->dpio_lock);
4534
 
4864
 
4535
	vlv_update_cdclk(dev);
4865
	vlv_update_cdclk(dev);
4536
}
4866
}
-
 
4867
 
-
 
4868
static void cherryview_set_cdclk(struct drm_device *dev, int cdclk)
-
 
4869
{
-
 
4870
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
4871
	u32 val, cmd;
-
 
4872
 
-
 
4873
	WARN_ON(dev_priv->display.get_display_clock_speed(dev) != dev_priv->vlv_cdclk_freq);
-
 
4874
 
-
 
4875
	switch (cdclk) {
-
 
4876
	case 400000:
-
 
4877
		cmd = 3;
-
 
4878
		break;
-
 
4879
	case 333333:
-
 
4880
	case 320000:
-
 
4881
		cmd = 2;
-
 
4882
		break;
-
 
4883
	case 266667:
-
 
4884
		cmd = 1;
-
 
4885
		break;
-
 
4886
	case 200000:
-
 
4887
		cmd = 0;
-
 
4888
		break;
-
 
4889
	default:
-
 
4890
		WARN_ON(1);
-
 
4891
		return;
-
 
4892
	}
-
 
4893
 
-
 
4894
	mutex_lock(&dev_priv->rps.hw_lock);
-
 
4895
	val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
-
 
4896
	val &= ~DSPFREQGUAR_MASK_CHV;
-
 
4897
	val |= (cmd << DSPFREQGUAR_SHIFT_CHV);
-
 
4898
	vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
-
 
4899
	if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) &
-
 
4900
		      DSPFREQSTAT_MASK_CHV) == (cmd << DSPFREQSTAT_SHIFT_CHV),
-
 
4901
		     50)) {
-
 
4902
		DRM_ERROR("timed out waiting for CDclk change\n");
-
 
4903
	}
-
 
4904
	mutex_unlock(&dev_priv->rps.hw_lock);
-
 
4905
 
-
 
4906
	vlv_update_cdclk(dev);
-
 
4907
}
4537
 
4908
 
4538
static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv,
4909
static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv,
4539
				 int max_pixclk)
4910
				 int max_pixclk)
4540
{
4911
{
4541
	int vco = valleyview_get_vco(dev_priv);
-
 
4542
	int freq_320 = (vco <<  1) % 320000 != 0 ? 333333 : 320000;
4912
	int freq_320 = (dev_priv->hpll_freq <<  1) % 320000 != 0 ? 333333 : 320000;
-
 
4913
 
-
 
4914
	/* FIXME: Punit isn't quite ready yet */
-
 
4915
	if (IS_CHERRYVIEW(dev_priv->dev))
-
 
4916
		return 400000;
4543
 
4917
 
4544
	/*
4918
	/*
4545
	 * Really only a few cases to deal with, as only 4 CDclks are supported:
4919
	 * Really only a few cases to deal with, as only 4 CDclks are supported:
4546
	 *   200MHz
4920
	 *   200MHz
4547
	 *   267MHz
4921
	 *   267MHz
4548
	 *   320/333MHz (depends on HPLL freq)
4922
	 *   320/333MHz (depends on HPLL freq)
4549
	 *   400MHz
4923
	 *   400MHz
4550
	 * So we check to see whether we're above 90% of the lower bin and
4924
	 * So we check to see whether we're above 90% of the lower bin and
4551
	 * adjust if needed.
4925
	 * adjust if needed.
4552
	 *
4926
	 *
4553
	 * We seem to get an unstable or solid color picture at 200MHz.
4927
	 * We seem to get an unstable or solid color picture at 200MHz.
4554
	 * Not sure what's wrong. For now use 200MHz only when all pipes
4928
	 * Not sure what's wrong. For now use 200MHz only when all pipes
4555
	 * are off.
4929
	 * are off.
4556
	 */
4930
	 */
4557
	if (max_pixclk > freq_320*9/10)
4931
	if (max_pixclk > freq_320*9/10)
4558
		return 400000;
4932
		return 400000;
4559
	else if (max_pixclk > 266667*9/10)
4933
	else if (max_pixclk > 266667*9/10)
4560
		return freq_320;
4934
		return freq_320;
4561
	else if (max_pixclk > 0)
4935
	else if (max_pixclk > 0)
4562
		return 266667;
4936
		return 266667;
4563
	else
4937
	else
4564
		return 200000;
4938
		return 200000;
4565
}
4939
}
4566
 
4940
 
4567
/* compute the max pixel clock for new configuration */
4941
/* compute the max pixel clock for new configuration */
4568
static int intel_mode_max_pixclk(struct drm_i915_private *dev_priv)
4942
static int intel_mode_max_pixclk(struct drm_i915_private *dev_priv)
4569
{
4943
{
4570
	struct drm_device *dev = dev_priv->dev;
4944
	struct drm_device *dev = dev_priv->dev;
4571
	struct intel_crtc *intel_crtc;
4945
	struct intel_crtc *intel_crtc;
4572
	int max_pixclk = 0;
4946
	int max_pixclk = 0;
4573
 
4947
 
4574
	for_each_intel_crtc(dev, intel_crtc) {
4948
	for_each_intel_crtc(dev, intel_crtc) {
4575
		if (intel_crtc->new_enabled)
4949
		if (intel_crtc->new_enabled)
4576
			max_pixclk = max(max_pixclk,
4950
			max_pixclk = max(max_pixclk,
4577
					 intel_crtc->new_config->adjusted_mode.crtc_clock);
4951
					 intel_crtc->new_config->adjusted_mode.crtc_clock);
4578
	}
4952
	}
4579
 
4953
 
4580
	return max_pixclk;
4954
	return max_pixclk;
4581
}
4955
}
4582
 
4956
 
4583
static void valleyview_modeset_global_pipes(struct drm_device *dev,
4957
static void valleyview_modeset_global_pipes(struct drm_device *dev,
4584
					    unsigned *prepare_pipes)
4958
					    unsigned *prepare_pipes)
4585
{
4959
{
4586
	struct drm_i915_private *dev_priv = dev->dev_private;
4960
	struct drm_i915_private *dev_priv = dev->dev_private;
4587
	struct intel_crtc *intel_crtc;
4961
	struct intel_crtc *intel_crtc;
4588
	int max_pixclk = intel_mode_max_pixclk(dev_priv);
4962
	int max_pixclk = intel_mode_max_pixclk(dev_priv);
4589
 
4963
 
4590
	if (valleyview_calc_cdclk(dev_priv, max_pixclk) ==
4964
	if (valleyview_calc_cdclk(dev_priv, max_pixclk) ==
4591
	    dev_priv->vlv_cdclk_freq)
4965
	    dev_priv->vlv_cdclk_freq)
4592
		return;
4966
		return;
4593
 
4967
 
4594
	/* disable/enable all currently active pipes while we change cdclk */
4968
	/* disable/enable all currently active pipes while we change cdclk */
4595
	for_each_intel_crtc(dev, intel_crtc)
4969
	for_each_intel_crtc(dev, intel_crtc)
4596
		if (intel_crtc->base.enabled)
4970
		if (intel_crtc->base.enabled)
4597
			*prepare_pipes |= (1 << intel_crtc->pipe);
4971
			*prepare_pipes |= (1 << intel_crtc->pipe);
4598
}
4972
}
4599
 
4973
 
4600
static void valleyview_modeset_global_resources(struct drm_device *dev)
4974
static void valleyview_modeset_global_resources(struct drm_device *dev)
4601
{
4975
{
4602
	struct drm_i915_private *dev_priv = dev->dev_private;
4976
	struct drm_i915_private *dev_priv = dev->dev_private;
4603
	int max_pixclk = intel_mode_max_pixclk(dev_priv);
4977
	int max_pixclk = intel_mode_max_pixclk(dev_priv);
4604
	int req_cdclk = valleyview_calc_cdclk(dev_priv, max_pixclk);
4978
	int req_cdclk = valleyview_calc_cdclk(dev_priv, max_pixclk);
4605
 
4979
 
-
 
4980
	if (req_cdclk != dev_priv->vlv_cdclk_freq) {
-
 
4981
		/*
-
 
4982
		 * FIXME: We can end up here with all power domains off, yet
-
 
4983
		 * with a CDCLK frequency other than the minimum. To account
-
 
4984
		 * for this take the PIPE-A power domain, which covers the HW
-
 
4985
		 * blocks needed for the following programming. This can be
-
 
4986
		 * removed once it's guaranteed that we get here either with
-
 
4987
		 * the minimum CDCLK set, or the required power domains
-
 
4988
		 * enabled.
-
 
4989
		 */
-
 
4990
		intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A);
-
 
4991
 
-
 
4992
		if (IS_CHERRYVIEW(dev))
-
 
4993
			cherryview_set_cdclk(dev, req_cdclk);
4606
	if (req_cdclk != dev_priv->vlv_cdclk_freq)
4994
		else
-
 
4995
		valleyview_set_cdclk(dev, req_cdclk);
4607
		valleyview_set_cdclk(dev, req_cdclk);
4996
 
-
 
4997
		intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A);
4608
	modeset_update_crtc_power_domains(dev);
4998
	}
4609
}
4999
}
4610
 
5000
 
4611
static void valleyview_crtc_enable(struct drm_crtc *crtc)
5001
static void valleyview_crtc_enable(struct drm_crtc *crtc)
4612
{
5002
{
4613
	struct drm_device *dev = crtc->dev;
5003
	struct drm_device *dev = crtc->dev;
4614
	struct drm_i915_private *dev_priv = dev->dev_private;
5004
	struct drm_i915_private *dev_priv = to_i915(dev);
4615
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5005
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4616
	struct intel_encoder *encoder;
5006
	struct intel_encoder *encoder;
4617
	int pipe = intel_crtc->pipe;
5007
	int pipe = intel_crtc->pipe;
4618
	int plane = intel_crtc->plane;
-
 
4619
	bool is_dsi;
5008
	bool is_dsi;
4620
	u32 dspcntr;
-
 
4621
 
5009
 
4622
	WARN_ON(!crtc->enabled);
5010
	WARN_ON(!crtc->enabled);
4623
 
5011
 
4624
	if (intel_crtc->active)
5012
	if (intel_crtc->active)
4625
		return;
5013
		return;
4626
 
5014
 
4627
	is_dsi = intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI);
-
 
4628
 
-
 
4629
	if (!is_dsi && !IS_CHERRYVIEW(dev))
-
 
-
 
5015
	is_dsi = intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI);
-
 
5016
 
4630
	vlv_prepare_pll(intel_crtc);
5017
	if (!is_dsi) {
-
 
5018
		if (IS_CHERRYVIEW(dev))
4631
 
5019
			chv_prepare_pll(intel_crtc, &intel_crtc->config);
-
 
5020
		else
4632
	/* Set up the display plane register */
5021
			vlv_prepare_pll(intel_crtc, &intel_crtc->config);
4633
	dspcntr = DISPPLANE_GAMMA_ENABLE;
5022
	}
4634
 
5023
 
4635
	if (intel_crtc->config.has_dp_encoder)
5024
	if (intel_crtc->config.has_dp_encoder)
4636
		intel_dp_set_m_n(intel_crtc);
5025
		intel_dp_set_m_n(intel_crtc);
4637
 
5026
 
4638
	intel_set_pipe_timings(intel_crtc);
5027
	intel_set_pipe_timings(intel_crtc);
4639
 
-
 
4640
	/* pipesrc and dspsize control the size that is scaled from,
-
 
4641
	 * which should always be the user's requested size.
-
 
4642
	 */
-
 
4643
	I915_WRITE(DSPSIZE(plane),
5028
 
4644
		   ((intel_crtc->config.pipe_src_h - 1) << 16) |
5029
	if (IS_CHERRYVIEW(dev) && pipe == PIPE_B) {
4645
		   (intel_crtc->config.pipe_src_w - 1));
-
 
4646
	I915_WRITE(DSPPOS(plane), 0);
-
 
4647
 
-
 
4648
	i9xx_set_pipeconf(intel_crtc);
5030
		struct drm_i915_private *dev_priv = dev->dev_private;
4649
 
5031
 
-
 
5032
		I915_WRITE(CHV_BLEND(pipe), CHV_BLEND_LEGACY);
4650
	I915_WRITE(DSPCNTR(plane), dspcntr);
-
 
4651
	POSTING_READ(DSPCNTR(plane));
5033
		I915_WRITE(CHV_CANVAS(pipe), 0);
4652
 
5034
	}
4653
	dev_priv->display.update_primary_plane(crtc, crtc->primary->fb,
5035
 
4654
					       crtc->x, crtc->y);
5036
	i9xx_set_pipeconf(intel_crtc);
4655
 
5037
 
4656
	intel_crtc->active = true;
5038
	intel_crtc->active = true;
4657
 
5039
 
4658
	intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
5040
	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
4659
 
5041
 
4660
	for_each_encoder_on_crtc(dev, crtc, encoder)
5042
	for_each_encoder_on_crtc(dev, crtc, encoder)
4661
		if (encoder->pre_pll_enable)
5043
		if (encoder->pre_pll_enable)
4662
			encoder->pre_pll_enable(encoder);
5044
			encoder->pre_pll_enable(encoder);
4663
 
5045
 
4664
	if (!is_dsi) {
5046
	if (!is_dsi) {
4665
		if (IS_CHERRYVIEW(dev))
5047
		if (IS_CHERRYVIEW(dev))
4666
			chv_enable_pll(intel_crtc);
5048
			chv_enable_pll(intel_crtc, &intel_crtc->config);
4667
		else
5049
		else
4668
	vlv_enable_pll(intel_crtc);
5050
			vlv_enable_pll(intel_crtc, &intel_crtc->config);
4669
	}
5051
	}
4670
 
5052
 
4671
	for_each_encoder_on_crtc(dev, crtc, encoder)
5053
	for_each_encoder_on_crtc(dev, crtc, encoder)
4672
		if (encoder->pre_enable)
5054
		if (encoder->pre_enable)
4673
			encoder->pre_enable(encoder);
5055
			encoder->pre_enable(encoder);
4674
 
5056
 
4675
	i9xx_pfit_enable(intel_crtc);
5057
	i9xx_pfit_enable(intel_crtc);
4676
 
5058
 
4677
	intel_crtc_load_lut(crtc);
5059
	intel_crtc_load_lut(crtc);
4678
 
5060
 
4679
	intel_update_watermarks(crtc);
5061
	intel_update_watermarks(crtc);
4680
	intel_enable_pipe(intel_crtc);
5062
	intel_enable_pipe(intel_crtc);
4681
 
5063
 
4682
	for_each_encoder_on_crtc(dev, crtc, encoder)
5064
	for_each_encoder_on_crtc(dev, crtc, encoder)
4683
		encoder->enable(encoder);
5065
		encoder->enable(encoder);
-
 
5066
 
-
 
5067
	assert_vblank_disabled(crtc);
-
 
5068
	drm_crtc_vblank_on(crtc);
4684
 
5069
 
4685
	intel_crtc_enable_planes(crtc);
5070
	intel_crtc_enable_planes(crtc);
4686
 
5071
 
4687
	/* Underruns don't raise interrupts, so check manually. */
5072
	/* Underruns don't raise interrupts, so check manually. */
4688
	i9xx_check_fifo_underruns(dev);
5073
	i9xx_check_fifo_underruns(dev_priv);
4689
}
5074
}
4690
 
5075
 
4691
static void i9xx_set_pll_dividers(struct intel_crtc *crtc)
5076
static void i9xx_set_pll_dividers(struct intel_crtc *crtc)
4692
{
5077
{
4693
	struct drm_device *dev = crtc->base.dev;
5078
	struct drm_device *dev = crtc->base.dev;
4694
	struct drm_i915_private *dev_priv = dev->dev_private;
5079
	struct drm_i915_private *dev_priv = dev->dev_private;
4695
 
5080
 
4696
	I915_WRITE(FP0(crtc->pipe), crtc->config.dpll_hw_state.fp0);
5081
	I915_WRITE(FP0(crtc->pipe), crtc->config.dpll_hw_state.fp0);
4697
	I915_WRITE(FP1(crtc->pipe), crtc->config.dpll_hw_state.fp1);
5082
	I915_WRITE(FP1(crtc->pipe), crtc->config.dpll_hw_state.fp1);
4698
}
5083
}
4699
 
5084
 
4700
static void i9xx_crtc_enable(struct drm_crtc *crtc)
5085
static void i9xx_crtc_enable(struct drm_crtc *crtc)
4701
{
5086
{
4702
    struct drm_device *dev = crtc->dev;
5087
    struct drm_device *dev = crtc->dev;
4703
    struct drm_i915_private *dev_priv = dev->dev_private;
5088
	struct drm_i915_private *dev_priv = to_i915(dev);
4704
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5089
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4705
	struct intel_encoder *encoder;
5090
	struct intel_encoder *encoder;
4706
    int pipe = intel_crtc->pipe;
5091
    int pipe = intel_crtc->pipe;
4707
    int plane = intel_crtc->plane;
-
 
4708
	u32 dspcntr;
-
 
4709
 
5092
 
4710
	WARN_ON(!crtc->enabled);
5093
	WARN_ON(!crtc->enabled);
4711
 
5094
 
4712
    if (intel_crtc->active)
5095
    if (intel_crtc->active)
4713
        return;
5096
        return;
4714
 
5097
 
4715
	i9xx_set_pll_dividers(intel_crtc);
5098
	i9xx_set_pll_dividers(intel_crtc);
4716
 
-
 
4717
	/* Set up the display plane register */
-
 
4718
	dspcntr = DISPPLANE_GAMMA_ENABLE;
-
 
4719
 
-
 
4720
	if (pipe == 0)
-
 
4721
		dspcntr &= ~DISPPLANE_SEL_PIPE_MASK;
-
 
4722
	else
-
 
4723
		dspcntr |= DISPPLANE_SEL_PIPE_B;
-
 
4724
 
5099
 
4725
	if (intel_crtc->config.has_dp_encoder)
5100
	if (intel_crtc->config.has_dp_encoder)
4726
		intel_dp_set_m_n(intel_crtc);
5101
		intel_dp_set_m_n(intel_crtc);
4727
 
5102
 
4728
	intel_set_pipe_timings(intel_crtc);
5103
	intel_set_pipe_timings(intel_crtc);
4729
 
-
 
4730
	/* pipesrc and dspsize control the size that is scaled from,
-
 
4731
	 * which should always be the user's requested size.
-
 
4732
	 */
-
 
4733
	I915_WRITE(DSPSIZE(plane),
-
 
4734
		   ((intel_crtc->config.pipe_src_h - 1) << 16) |
-
 
4735
		   (intel_crtc->config.pipe_src_w - 1));
-
 
4736
	I915_WRITE(DSPPOS(plane), 0);
-
 
4737
 
5104
 
4738
	i9xx_set_pipeconf(intel_crtc);
-
 
4739
 
-
 
4740
	I915_WRITE(DSPCNTR(plane), dspcntr);
-
 
4741
	POSTING_READ(DSPCNTR(plane));
-
 
4742
 
-
 
4743
	dev_priv->display.update_primary_plane(crtc, crtc->primary->fb,
-
 
4744
					       crtc->x, crtc->y);
5105
	i9xx_set_pipeconf(intel_crtc);
4745
 
5106
 
4746
    intel_crtc->active = true;
5107
    intel_crtc->active = true;
4747
 
5108
 
4748
	if (!IS_GEN2(dev))
5109
	if (!IS_GEN2(dev))
4749
		intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
5110
		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
4750
 
5111
 
4751
	for_each_encoder_on_crtc(dev, crtc, encoder)
5112
	for_each_encoder_on_crtc(dev, crtc, encoder)
4752
		if (encoder->pre_enable)
5113
		if (encoder->pre_enable)
4753
			encoder->pre_enable(encoder);
5114
			encoder->pre_enable(encoder);
4754
 
5115
 
4755
	i9xx_enable_pll(intel_crtc);
5116
	i9xx_enable_pll(intel_crtc);
4756
 
5117
 
4757
	i9xx_pfit_enable(intel_crtc);
5118
	i9xx_pfit_enable(intel_crtc);
4758
 
5119
 
4759
	intel_crtc_load_lut(crtc);
5120
	intel_crtc_load_lut(crtc);
4760
 
5121
 
4761
	intel_update_watermarks(crtc);
5122
	intel_update_watermarks(crtc);
4762
	intel_enable_pipe(intel_crtc);
5123
	intel_enable_pipe(intel_crtc);
4763
 
5124
 
4764
	for_each_encoder_on_crtc(dev, crtc, encoder)
5125
	for_each_encoder_on_crtc(dev, crtc, encoder)
4765
		encoder->enable(encoder);
5126
		encoder->enable(encoder);
-
 
5127
 
-
 
5128
	assert_vblank_disabled(crtc);
-
 
5129
	drm_crtc_vblank_on(crtc);
4766
 
5130
 
4767
	intel_crtc_enable_planes(crtc);
5131
	intel_crtc_enable_planes(crtc);
4768
 
5132
 
4769
	/*
5133
	/*
4770
	 * Gen2 reports pipe underruns whenever all planes are disabled.
5134
	 * Gen2 reports pipe underruns whenever all planes are disabled.
4771
	 * So don't enable underrun reporting before at least some planes
5135
	 * So don't enable underrun reporting before at least some planes
4772
	 * are enabled.
5136
	 * are enabled.
4773
	 * FIXME: Need to fix the logic to work when we turn off all planes
5137
	 * FIXME: Need to fix the logic to work when we turn off all planes
4774
	 * but leave the pipe running.
5138
	 * but leave the pipe running.
4775
	 */
5139
	 */
4776
	if (IS_GEN2(dev))
5140
	if (IS_GEN2(dev))
4777
	intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
5141
		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
4778
 
5142
 
4779
	/* Underruns don't raise interrupts, so check manually. */
5143
	/* Underruns don't raise interrupts, so check manually. */
4780
	i9xx_check_fifo_underruns(dev);
5144
	i9xx_check_fifo_underruns(dev_priv);
4781
}
5145
}
4782
 
5146
 
4783
static void i9xx_pfit_disable(struct intel_crtc *crtc)
5147
static void i9xx_pfit_disable(struct intel_crtc *crtc)
4784
{
5148
{
4785
	struct drm_device *dev = crtc->base.dev;
5149
	struct drm_device *dev = crtc->base.dev;
4786
	struct drm_i915_private *dev_priv = dev->dev_private;
5150
	struct drm_i915_private *dev_priv = dev->dev_private;
4787
 
5151
 
4788
	if (!crtc->config.gmch_pfit.control)
5152
	if (!crtc->config.gmch_pfit.control)
4789
		return;
5153
		return;
4790
 
5154
 
4791
	assert_pipe_disabled(dev_priv, crtc->pipe);
5155
	assert_pipe_disabled(dev_priv, crtc->pipe);
4792
 
5156
 
4793
	DRM_DEBUG_DRIVER("disabling pfit, current: 0x%08x\n",
5157
	DRM_DEBUG_DRIVER("disabling pfit, current: 0x%08x\n",
4794
			 I915_READ(PFIT_CONTROL));
5158
			 I915_READ(PFIT_CONTROL));
4795
		I915_WRITE(PFIT_CONTROL, 0);
5159
		I915_WRITE(PFIT_CONTROL, 0);
4796
}
5160
}
4797
 
5161
 
4798
static void i9xx_crtc_disable(struct drm_crtc *crtc)
5162
static void i9xx_crtc_disable(struct drm_crtc *crtc)
4799
{
5163
{
4800
    struct drm_device *dev = crtc->dev;
5164
    struct drm_device *dev = crtc->dev;
4801
    struct drm_i915_private *dev_priv = dev->dev_private;
5165
    struct drm_i915_private *dev_priv = dev->dev_private;
4802
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5166
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4803
	struct intel_encoder *encoder;
5167
	struct intel_encoder *encoder;
4804
    int pipe = intel_crtc->pipe;
5168
    int pipe = intel_crtc->pipe;
4805
 
5169
 
4806
    if (!intel_crtc->active)
5170
    if (!intel_crtc->active)
4807
        return;
5171
        return;
4808
 
5172
 
4809
	/*
5173
	/*
4810
	 * Gen2 reports pipe underruns whenever all planes are disabled.
5174
	 * Gen2 reports pipe underruns whenever all planes are disabled.
4811
	 * So diasble underrun reporting before all the planes get disabled.
5175
	 * So diasble underrun reporting before all the planes get disabled.
4812
	 * FIXME: Need to fix the logic to work when we turn off all planes
5176
	 * FIXME: Need to fix the logic to work when we turn off all planes
4813
	 * but leave the pipe running.
5177
	 * but leave the pipe running.
4814
	 */
5178
	 */
4815
	if (IS_GEN2(dev))
5179
	if (IS_GEN2(dev))
4816
		intel_set_cpu_fifo_underrun_reporting(dev, pipe, false);
5180
		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
4817
 
5181
 
4818
	/*
5182
	/*
4819
	 * Vblank time updates from the shadow to live plane control register
5183
	 * Vblank time updates from the shadow to live plane control register
4820
	 * are blocked if the memory self-refresh mode is active at that
5184
	 * are blocked if the memory self-refresh mode is active at that
4821
	 * moment. So to make sure the plane gets truly disabled, disable
5185
	 * moment. So to make sure the plane gets truly disabled, disable
4822
	 * first the self-refresh mode. The self-refresh enable bit in turn
5186
	 * first the self-refresh mode. The self-refresh enable bit in turn
4823
	 * will be checked/applied by the HW only at the next frame start
5187
	 * will be checked/applied by the HW only at the next frame start
4824
	 * event which is after the vblank start event, so we need to have a
5188
	 * event which is after the vblank start event, so we need to have a
4825
	 * wait-for-vblank between disabling the plane and the pipe.
5189
	 * wait-for-vblank between disabling the plane and the pipe.
4826
	 */
5190
	 */
4827
	intel_set_memory_cxsr(dev_priv, false);
5191
	intel_set_memory_cxsr(dev_priv, false);
4828
	intel_crtc_disable_planes(crtc);
5192
	intel_crtc_disable_planes(crtc);
4829
 
-
 
4830
	for_each_encoder_on_crtc(dev, crtc, encoder)
-
 
4831
		encoder->disable(encoder);
-
 
4832
 
5193
 
4833
	/*
5194
	/*
4834
	 * On gen2 planes are double buffered but the pipe isn't, so we must
5195
	 * On gen2 planes are double buffered but the pipe isn't, so we must
4835
	 * wait for planes to fully turn off before disabling the pipe.
5196
	 * wait for planes to fully turn off before disabling the pipe.
4836
	 * We also need to wait on all gmch platforms because of the
5197
	 * We also need to wait on all gmch platforms because of the
4837
	 * self-refresh mode constraint explained above.
5198
	 * self-refresh mode constraint explained above.
4838
	 */
5199
	 */
4839
		intel_wait_for_vblank(dev, pipe);
5200
		intel_wait_for_vblank(dev, pipe);
-
 
5201
 
-
 
5202
	drm_crtc_vblank_off(crtc);
-
 
5203
	assert_vblank_disabled(crtc);
-
 
5204
 
-
 
5205
	for_each_encoder_on_crtc(dev, crtc, encoder)
-
 
5206
		encoder->disable(encoder);
4840
 
5207
 
4841
    intel_disable_pipe(dev_priv, pipe);
5208
	intel_disable_pipe(intel_crtc);
4842
 
5209
 
4843
	i9xx_pfit_disable(intel_crtc);
5210
	i9xx_pfit_disable(intel_crtc);
4844
 
5211
 
4845
	for_each_encoder_on_crtc(dev, crtc, encoder)
5212
	for_each_encoder_on_crtc(dev, crtc, encoder)
4846
		if (encoder->post_disable)
5213
		if (encoder->post_disable)
4847
			encoder->post_disable(encoder);
5214
			encoder->post_disable(encoder);
4848
 
5215
 
4849
	if (!intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI)) {
5216
	if (!intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI)) {
4850
		if (IS_CHERRYVIEW(dev))
5217
		if (IS_CHERRYVIEW(dev))
4851
			chv_disable_pll(dev_priv, pipe);
5218
			chv_disable_pll(dev_priv, pipe);
4852
		else if (IS_VALLEYVIEW(dev))
5219
		else if (IS_VALLEYVIEW(dev))
4853
		vlv_disable_pll(dev_priv, pipe);
5220
		vlv_disable_pll(dev_priv, pipe);
4854
		else
5221
		else
4855
	i9xx_disable_pll(dev_priv, pipe);
5222
			i9xx_disable_pll(intel_crtc);
4856
	}
5223
	}
4857
 
5224
 
4858
	if (!IS_GEN2(dev))
5225
	if (!IS_GEN2(dev))
4859
		intel_set_cpu_fifo_underrun_reporting(dev, pipe, false);
5226
		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
4860
 
5227
 
4861
    intel_crtc->active = false;
5228
    intel_crtc->active = false;
4862
	intel_update_watermarks(crtc);
5229
	intel_update_watermarks(crtc);
4863
 
5230
 
4864
	mutex_lock(&dev->struct_mutex);
5231
	mutex_lock(&dev->struct_mutex);
4865
    intel_update_fbc(dev);
5232
    intel_update_fbc(dev);
4866
	mutex_unlock(&dev->struct_mutex);
5233
	mutex_unlock(&dev->struct_mutex);
4867
}
5234
}
4868
 
5235
 
4869
static void i9xx_crtc_off(struct drm_crtc *crtc)
5236
static void i9xx_crtc_off(struct drm_crtc *crtc)
4870
{
5237
{
4871
}
5238
}
4872
 
-
 
4873
static void intel_crtc_update_sarea(struct drm_crtc *crtc,
-
 
4874
				    bool enabled)
-
 
4875
{
-
 
4876
	struct drm_device *dev = crtc->dev;
-
 
4877
	struct drm_i915_master_private *master_priv;
-
 
4878
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-
 
4879
	int pipe = intel_crtc->pipe;
-
 
4880
 
-
 
4881
 
-
 
4882
#if 0
-
 
4883
	if (!dev->primary->master)
-
 
4884
		return;
-
 
4885
 
-
 
4886
	master_priv = dev->primary->master->driver_priv;
-
 
4887
	if (!master_priv->sarea_priv)
-
 
4888
		return;
-
 
4889
 
-
 
4890
	switch (pipe) {
-
 
4891
	case 0:
-
 
4892
		master_priv->sarea_priv->pipeA_w = enabled ? crtc->mode.hdisplay : 0;
-
 
4893
		master_priv->sarea_priv->pipeA_h = enabled ? crtc->mode.vdisplay : 0;
-
 
4894
		break;
-
 
4895
	case 1:
-
 
4896
		master_priv->sarea_priv->pipeB_w = enabled ? crtc->mode.hdisplay : 0;
-
 
4897
		master_priv->sarea_priv->pipeB_h = enabled ? crtc->mode.vdisplay : 0;
-
 
4898
		break;
-
 
4899
	default:
-
 
4900
		DRM_ERROR("Can't update pipe %c in SAREA\n", pipe_name(pipe));
-
 
4901
		break;
-
 
4902
	}
-
 
4903
#endif
-
 
4904
}
-
 
4905
 
5239
 
4906
/* Master function to enable/disable CRTC and corresponding power wells */
5240
/* Master function to enable/disable CRTC and corresponding power wells */
4907
void intel_crtc_control(struct drm_crtc *crtc, bool enable)
5241
void intel_crtc_control(struct drm_crtc *crtc, bool enable)
4908
{
5242
{
4909
	struct drm_device *dev = crtc->dev;
5243
	struct drm_device *dev = crtc->dev;
4910
	struct drm_i915_private *dev_priv = dev->dev_private;
5244
	struct drm_i915_private *dev_priv = dev->dev_private;
4911
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5245
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4912
	enum intel_display_power_domain domain;
5246
	enum intel_display_power_domain domain;
4913
	unsigned long domains;
5247
	unsigned long domains;
4914
 
5248
 
4915
	if (enable) {
5249
	if (enable) {
4916
		if (!intel_crtc->active) {
5250
		if (!intel_crtc->active) {
4917
			domains = get_crtc_power_domains(crtc);
5251
			domains = get_crtc_power_domains(crtc);
4918
			for_each_power_domain(domain, domains)
5252
			for_each_power_domain(domain, domains)
4919
				intel_display_power_get(dev_priv, domain);
5253
				intel_display_power_get(dev_priv, domain);
4920
			intel_crtc->enabled_power_domains = domains;
5254
			intel_crtc->enabled_power_domains = domains;
4921
 
5255
 
4922
			dev_priv->display.crtc_enable(crtc);
5256
			dev_priv->display.crtc_enable(crtc);
4923
		}
5257
		}
4924
	} else {
5258
	} else {
4925
		if (intel_crtc->active) {
5259
		if (intel_crtc->active) {
4926
			dev_priv->display.crtc_disable(crtc);
5260
			dev_priv->display.crtc_disable(crtc);
4927
 
5261
 
4928
			domains = intel_crtc->enabled_power_domains;
5262
			domains = intel_crtc->enabled_power_domains;
4929
			for_each_power_domain(domain, domains)
5263
			for_each_power_domain(domain, domains)
4930
				intel_display_power_put(dev_priv, domain);
5264
				intel_display_power_put(dev_priv, domain);
4931
			intel_crtc->enabled_power_domains = 0;
5265
			intel_crtc->enabled_power_domains = 0;
4932
		}
5266
		}
4933
	}
5267
	}
4934
}
5268
}
4935
 
5269
 
4936
/**
5270
/**
4937
 * Sets the power management mode of the pipe and plane.
5271
 * Sets the power management mode of the pipe and plane.
4938
 */
5272
 */
4939
void intel_crtc_update_dpms(struct drm_crtc *crtc)
5273
void intel_crtc_update_dpms(struct drm_crtc *crtc)
4940
{
5274
{
4941
	struct drm_device *dev = crtc->dev;
5275
	struct drm_device *dev = crtc->dev;
4942
	struct intel_encoder *intel_encoder;
5276
	struct intel_encoder *intel_encoder;
4943
	bool enable = false;
5277
	bool enable = false;
4944
 
5278
 
4945
	for_each_encoder_on_crtc(dev, crtc, intel_encoder)
5279
	for_each_encoder_on_crtc(dev, crtc, intel_encoder)
4946
		enable |= intel_encoder->connectors_active;
5280
		enable |= intel_encoder->connectors_active;
4947
 
5281
 
4948
	intel_crtc_control(crtc, enable);
5282
	intel_crtc_control(crtc, enable);
4949
 
-
 
4950
	intel_crtc_update_sarea(crtc, enable);
-
 
4951
}
5283
}
4952
 
5284
 
4953
static void intel_crtc_disable(struct drm_crtc *crtc)
5285
static void intel_crtc_disable(struct drm_crtc *crtc)
4954
{
5286
{
4955
	struct drm_device *dev = crtc->dev;
5287
	struct drm_device *dev = crtc->dev;
4956
	struct drm_connector *connector;
5288
	struct drm_connector *connector;
4957
	struct drm_i915_private *dev_priv = dev->dev_private;
5289
	struct drm_i915_private *dev_priv = dev->dev_private;
4958
	struct drm_i915_gem_object *old_obj = intel_fb_obj(crtc->primary->fb);
5290
	struct drm_i915_gem_object *old_obj = intel_fb_obj(crtc->primary->fb);
4959
	enum pipe pipe = to_intel_crtc(crtc)->pipe;
5291
	enum pipe pipe = to_intel_crtc(crtc)->pipe;
4960
 
5292
 
4961
	/* crtc should still be enabled when we disable it. */
5293
	/* crtc should still be enabled when we disable it. */
4962
	WARN_ON(!crtc->enabled);
5294
	WARN_ON(!crtc->enabled);
4963
 
5295
 
4964
	dev_priv->display.crtc_disable(crtc);
5296
	dev_priv->display.crtc_disable(crtc);
4965
	intel_crtc_update_sarea(crtc, false);
-
 
4966
	dev_priv->display.off(crtc);
5297
	dev_priv->display.off(crtc);
4967
 
5298
 
4968
	if (crtc->primary->fb) {
5299
	if (crtc->primary->fb) {
4969
		mutex_lock(&dev->struct_mutex);
5300
		mutex_lock(&dev->struct_mutex);
4970
		intel_unpin_fb_obj(old_obj);
5301
		intel_unpin_fb_obj(old_obj);
4971
		i915_gem_track_fb(old_obj, NULL,
5302
		i915_gem_track_fb(old_obj, NULL,
4972
				  INTEL_FRONTBUFFER_PRIMARY(pipe));
5303
				  INTEL_FRONTBUFFER_PRIMARY(pipe));
4973
		mutex_unlock(&dev->struct_mutex);
5304
		mutex_unlock(&dev->struct_mutex);
4974
		crtc->primary->fb = NULL;
5305
		crtc->primary->fb = NULL;
4975
	}
5306
	}
4976
 
5307
 
4977
	/* Update computed state. */
5308
	/* Update computed state. */
4978
	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
5309
	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
4979
		if (!connector->encoder || !connector->encoder->crtc)
5310
		if (!connector->encoder || !connector->encoder->crtc)
4980
			continue;
5311
			continue;
4981
 
5312
 
4982
		if (connector->encoder->crtc != crtc)
5313
		if (connector->encoder->crtc != crtc)
4983
			continue;
5314
			continue;
4984
 
5315
 
4985
		connector->dpms = DRM_MODE_DPMS_OFF;
5316
		connector->dpms = DRM_MODE_DPMS_OFF;
4986
		to_intel_encoder(connector->encoder)->connectors_active = false;
5317
		to_intel_encoder(connector->encoder)->connectors_active = false;
4987
	}
5318
	}
4988
}
5319
}
4989
 
5320
 
4990
void intel_encoder_destroy(struct drm_encoder *encoder)
5321
void intel_encoder_destroy(struct drm_encoder *encoder)
4991
{
5322
{
4992
	struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
5323
	struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
4993
 
5324
 
4994
	drm_encoder_cleanup(encoder);
5325
	drm_encoder_cleanup(encoder);
4995
	kfree(intel_encoder);
5326
	kfree(intel_encoder);
4996
}
5327
}
4997
 
5328
 
4998
/* Simple dpms helper for encoders with just one connector, no cloning and only
5329
/* Simple dpms helper for encoders with just one connector, no cloning and only
4999
 * one kind of off state. It clamps all !ON modes to fully OFF and changes the
5330
 * one kind of off state. It clamps all !ON modes to fully OFF and changes the
5000
 * state of the entire output pipe. */
5331
 * state of the entire output pipe. */
5001
static void intel_encoder_dpms(struct intel_encoder *encoder, int mode)
5332
static void intel_encoder_dpms(struct intel_encoder *encoder, int mode)
5002
{
5333
{
5003
	if (mode == DRM_MODE_DPMS_ON) {
5334
	if (mode == DRM_MODE_DPMS_ON) {
5004
		encoder->connectors_active = true;
5335
		encoder->connectors_active = true;
5005
 
5336
 
5006
		intel_crtc_update_dpms(encoder->base.crtc);
5337
		intel_crtc_update_dpms(encoder->base.crtc);
5007
	} else {
5338
	} else {
5008
		encoder->connectors_active = false;
5339
		encoder->connectors_active = false;
5009
 
5340
 
5010
		intel_crtc_update_dpms(encoder->base.crtc);
5341
		intel_crtc_update_dpms(encoder->base.crtc);
5011
	}
5342
	}
5012
}
5343
}
5013
 
5344
 
5014
/* Cross check the actual hw state with our own modeset state tracking (and it's
5345
/* Cross check the actual hw state with our own modeset state tracking (and it's
5015
 * internal consistency). */
5346
 * internal consistency). */
5016
static void intel_connector_check_state(struct intel_connector *connector)
5347
static void intel_connector_check_state(struct intel_connector *connector)
5017
{
5348
{
5018
	if (connector->get_hw_state(connector)) {
5349
	if (connector->get_hw_state(connector)) {
5019
		struct intel_encoder *encoder = connector->encoder;
5350
		struct intel_encoder *encoder = connector->encoder;
5020
		struct drm_crtc *crtc;
5351
		struct drm_crtc *crtc;
5021
		bool encoder_enabled;
5352
		bool encoder_enabled;
5022
		enum pipe pipe;
5353
		enum pipe pipe;
5023
 
5354
 
5024
		DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
5355
		DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
5025
			      connector->base.base.id,
5356
			      connector->base.base.id,
5026
			      connector->base.name);
5357
			      connector->base.name);
5027
 
5358
 
5028
		/* there is no real hw state for MST connectors */
5359
		/* there is no real hw state for MST connectors */
5029
		if (connector->mst_port)
5360
		if (connector->mst_port)
5030
			return;
5361
			return;
5031
 
5362
 
5032
		WARN(connector->base.dpms == DRM_MODE_DPMS_OFF,
5363
		WARN(connector->base.dpms == DRM_MODE_DPMS_OFF,
5033
		     "wrong connector dpms state\n");
5364
		     "wrong connector dpms state\n");
5034
		WARN(connector->base.encoder != &encoder->base,
5365
		WARN(connector->base.encoder != &encoder->base,
5035
		     "active connector not linked to encoder\n");
5366
		     "active connector not linked to encoder\n");
5036
 
5367
 
5037
		if (encoder) {
5368
		if (encoder) {
5038
		WARN(!encoder->connectors_active,
5369
		WARN(!encoder->connectors_active,
5039
		     "encoder->connectors_active not set\n");
5370
		     "encoder->connectors_active not set\n");
5040
 
5371
 
5041
		encoder_enabled = encoder->get_hw_state(encoder, &pipe);
5372
		encoder_enabled = encoder->get_hw_state(encoder, &pipe);
5042
		WARN(!encoder_enabled, "encoder not enabled\n");
5373
		WARN(!encoder_enabled, "encoder not enabled\n");
5043
		if (WARN_ON(!encoder->base.crtc))
5374
		if (WARN_ON(!encoder->base.crtc))
5044
			return;
5375
			return;
5045
 
5376
 
5046
		crtc = encoder->base.crtc;
5377
		crtc = encoder->base.crtc;
5047
 
5378
 
5048
		WARN(!crtc->enabled, "crtc not enabled\n");
5379
		WARN(!crtc->enabled, "crtc not enabled\n");
5049
		WARN(!to_intel_crtc(crtc)->active, "crtc not active\n");
5380
		WARN(!to_intel_crtc(crtc)->active, "crtc not active\n");
5050
		WARN(pipe != to_intel_crtc(crtc)->pipe,
5381
		WARN(pipe != to_intel_crtc(crtc)->pipe,
5051
		     "encoder active on the wrong pipe\n");
5382
		     "encoder active on the wrong pipe\n");
5052
	}
5383
	}
5053
	}
5384
	}
5054
}
5385
}
5055
 
5386
 
5056
/* Even simpler default implementation, if there's really no special case to
5387
/* Even simpler default implementation, if there's really no special case to
5057
 * consider. */
5388
 * consider. */
5058
void intel_connector_dpms(struct drm_connector *connector, int mode)
5389
void intel_connector_dpms(struct drm_connector *connector, int mode)
5059
{
5390
{
5060
	/* All the simple cases only support two dpms states. */
5391
	/* All the simple cases only support two dpms states. */
5061
	if (mode != DRM_MODE_DPMS_ON)
5392
	if (mode != DRM_MODE_DPMS_ON)
5062
		mode = DRM_MODE_DPMS_OFF;
5393
		mode = DRM_MODE_DPMS_OFF;
5063
 
5394
 
5064
	if (mode == connector->dpms)
5395
	if (mode == connector->dpms)
5065
		return;
5396
		return;
5066
 
5397
 
5067
	connector->dpms = mode;
5398
	connector->dpms = mode;
5068
 
5399
 
5069
	/* Only need to change hw state when actually enabled */
5400
	/* Only need to change hw state when actually enabled */
5070
	if (connector->encoder)
5401
	if (connector->encoder)
5071
		intel_encoder_dpms(to_intel_encoder(connector->encoder), mode);
5402
		intel_encoder_dpms(to_intel_encoder(connector->encoder), mode);
5072
 
5403
 
5073
	intel_modeset_check_state(connector->dev);
5404
	intel_modeset_check_state(connector->dev);
5074
}
5405
}
5075
 
5406
 
5076
/* Simple connector->get_hw_state implementation for encoders that support only
5407
/* Simple connector->get_hw_state implementation for encoders that support only
5077
 * one connector and no cloning and hence the encoder state determines the state
5408
 * one connector and no cloning and hence the encoder state determines the state
5078
 * of the connector. */
5409
 * of the connector. */
5079
bool intel_connector_get_hw_state(struct intel_connector *connector)
5410
bool intel_connector_get_hw_state(struct intel_connector *connector)
5080
{
5411
{
5081
	enum pipe pipe = 0;
5412
	enum pipe pipe = 0;
5082
	struct intel_encoder *encoder = connector->encoder;
5413
	struct intel_encoder *encoder = connector->encoder;
5083
 
5414
 
5084
	return encoder->get_hw_state(encoder, &pipe);
5415
	return encoder->get_hw_state(encoder, &pipe);
5085
}
5416
}
5086
 
5417
 
5087
static bool ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
5418
static bool ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
5088
				     struct intel_crtc_config *pipe_config)
5419
				     struct intel_crtc_config *pipe_config)
5089
{
5420
{
5090
	struct drm_i915_private *dev_priv = dev->dev_private;
5421
	struct drm_i915_private *dev_priv = dev->dev_private;
5091
	struct intel_crtc *pipe_B_crtc =
5422
	struct intel_crtc *pipe_B_crtc =
5092
		to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_B]);
5423
		to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_B]);
5093
 
5424
 
5094
	DRM_DEBUG_KMS("checking fdi config on pipe %c, lanes %i\n",
5425
	DRM_DEBUG_KMS("checking fdi config on pipe %c, lanes %i\n",
5095
		      pipe_name(pipe), pipe_config->fdi_lanes);
5426
		      pipe_name(pipe), pipe_config->fdi_lanes);
5096
	if (pipe_config->fdi_lanes > 4) {
5427
	if (pipe_config->fdi_lanes > 4) {
5097
		DRM_DEBUG_KMS("invalid fdi lane config on pipe %c: %i lanes\n",
5428
		DRM_DEBUG_KMS("invalid fdi lane config on pipe %c: %i lanes\n",
5098
			      pipe_name(pipe), pipe_config->fdi_lanes);
5429
			      pipe_name(pipe), pipe_config->fdi_lanes);
5099
		return false;
5430
		return false;
5100
	}
5431
	}
5101
 
5432
 
5102
	if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
5433
	if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
5103
		if (pipe_config->fdi_lanes > 2) {
5434
		if (pipe_config->fdi_lanes > 2) {
5104
			DRM_DEBUG_KMS("only 2 lanes on haswell, required: %i lanes\n",
5435
			DRM_DEBUG_KMS("only 2 lanes on haswell, required: %i lanes\n",
5105
				      pipe_config->fdi_lanes);
5436
				      pipe_config->fdi_lanes);
5106
			return false;
5437
			return false;
5107
		} else {
5438
		} else {
5108
			return true;
5439
			return true;
5109
		}
5440
		}
5110
	}
5441
	}
5111
 
5442
 
5112
	if (INTEL_INFO(dev)->num_pipes == 2)
5443
	if (INTEL_INFO(dev)->num_pipes == 2)
5113
		return true;
5444
		return true;
5114
 
5445
 
5115
	/* Ivybridge 3 pipe is really complicated */
5446
	/* Ivybridge 3 pipe is really complicated */
5116
	switch (pipe) {
5447
	switch (pipe) {
5117
	case PIPE_A:
5448
	case PIPE_A:
5118
		return true;
5449
		return true;
5119
	case PIPE_B:
5450
	case PIPE_B:
5120
		if (dev_priv->pipe_to_crtc_mapping[PIPE_C]->enabled &&
5451
		if (dev_priv->pipe_to_crtc_mapping[PIPE_C]->enabled &&
5121
		    pipe_config->fdi_lanes > 2) {
5452
		    pipe_config->fdi_lanes > 2) {
5122
			DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n",
5453
			DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n",
5123
				      pipe_name(pipe), pipe_config->fdi_lanes);
5454
				      pipe_name(pipe), pipe_config->fdi_lanes);
5124
			return false;
5455
			return false;
5125
		}
5456
		}
5126
		return true;
5457
		return true;
5127
	case PIPE_C:
5458
	case PIPE_C:
5128
		if (!pipe_has_enabled_pch(pipe_B_crtc) ||
5459
		if (!pipe_has_enabled_pch(pipe_B_crtc) ||
5129
		    pipe_B_crtc->config.fdi_lanes <= 2) {
5460
		    pipe_B_crtc->config.fdi_lanes <= 2) {
5130
			if (pipe_config->fdi_lanes > 2) {
5461
			if (pipe_config->fdi_lanes > 2) {
5131
				DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n",
5462
				DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n",
5132
					      pipe_name(pipe), pipe_config->fdi_lanes);
5463
					      pipe_name(pipe), pipe_config->fdi_lanes);
5133
				return false;
5464
				return false;
5134
			}
5465
			}
5135
		} else {
5466
		} else {
5136
			DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n");
5467
			DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n");
5137
			return false;
5468
			return false;
5138
		}
5469
		}
5139
		return true;
5470
		return true;
5140
	default:
5471
	default:
5141
		BUG();
5472
		BUG();
5142
	}
5473
	}
5143
}
5474
}
5144
 
5475
 
5145
#define RETRY 1
5476
#define RETRY 1
5146
static int ironlake_fdi_compute_config(struct intel_crtc *intel_crtc,
5477
static int ironlake_fdi_compute_config(struct intel_crtc *intel_crtc,
5147
				      struct intel_crtc_config *pipe_config)
5478
				      struct intel_crtc_config *pipe_config)
5148
{
5479
{
5149
	struct drm_device *dev = intel_crtc->base.dev;
5480
	struct drm_device *dev = intel_crtc->base.dev;
5150
	struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
5481
	struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
5151
	int lane, link_bw, fdi_dotclock;
5482
	int lane, link_bw, fdi_dotclock;
5152
	bool setup_ok, needs_recompute = false;
5483
	bool setup_ok, needs_recompute = false;
5153
 
5484
 
5154
retry:
5485
retry:
5155
	/* FDI is a binary signal running at ~2.7GHz, encoding
5486
	/* FDI is a binary signal running at ~2.7GHz, encoding
5156
	 * each output octet as 10 bits. The actual frequency
5487
	 * each output octet as 10 bits. The actual frequency
5157
	 * is stored as a divider into a 100MHz clock, and the
5488
	 * is stored as a divider into a 100MHz clock, and the
5158
	 * mode pixel clock is stored in units of 1KHz.
5489
	 * mode pixel clock is stored in units of 1KHz.
5159
	 * Hence the bw of each lane in terms of the mode signal
5490
	 * Hence the bw of each lane in terms of the mode signal
5160
	 * is:
5491
	 * is:
5161
	 */
5492
	 */
5162
	link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10;
5493
	link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10;
5163
 
5494
 
5164
	fdi_dotclock = adjusted_mode->crtc_clock;
5495
	fdi_dotclock = adjusted_mode->crtc_clock;
5165
 
5496
 
5166
	lane = ironlake_get_lanes_required(fdi_dotclock, link_bw,
5497
	lane = ironlake_get_lanes_required(fdi_dotclock, link_bw,
5167
					   pipe_config->pipe_bpp);
5498
					   pipe_config->pipe_bpp);
5168
 
5499
 
5169
	pipe_config->fdi_lanes = lane;
5500
	pipe_config->fdi_lanes = lane;
5170
 
5501
 
5171
	intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
5502
	intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
5172
			       link_bw, &pipe_config->fdi_m_n);
5503
			       link_bw, &pipe_config->fdi_m_n);
5173
 
5504
 
5174
	setup_ok = ironlake_check_fdi_lanes(intel_crtc->base.dev,
5505
	setup_ok = ironlake_check_fdi_lanes(intel_crtc->base.dev,
5175
					    intel_crtc->pipe, pipe_config);
5506
					    intel_crtc->pipe, pipe_config);
5176
	if (!setup_ok && pipe_config->pipe_bpp > 6*3) {
5507
	if (!setup_ok && pipe_config->pipe_bpp > 6*3) {
5177
		pipe_config->pipe_bpp -= 2*3;
5508
		pipe_config->pipe_bpp -= 2*3;
5178
		DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n",
5509
		DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n",
5179
			      pipe_config->pipe_bpp);
5510
			      pipe_config->pipe_bpp);
5180
		needs_recompute = true;
5511
		needs_recompute = true;
5181
		pipe_config->bw_constrained = true;
5512
		pipe_config->bw_constrained = true;
5182
 
5513
 
5183
		goto retry;
5514
		goto retry;
5184
	}
5515
	}
5185
 
5516
 
5186
	if (needs_recompute)
5517
	if (needs_recompute)
5187
		return RETRY;
5518
		return RETRY;
5188
 
5519
 
5189
	return setup_ok ? 0 : -EINVAL;
5520
	return setup_ok ? 0 : -EINVAL;
5190
}
5521
}
5191
 
5522
 
5192
static void hsw_compute_ips_config(struct intel_crtc *crtc,
5523
static void hsw_compute_ips_config(struct intel_crtc *crtc,
5193
				   struct intel_crtc_config *pipe_config)
5524
				   struct intel_crtc_config *pipe_config)
5194
{
5525
{
5195
	pipe_config->ips_enabled = i915.enable_ips &&
5526
	pipe_config->ips_enabled = i915.enable_ips &&
5196
				   hsw_crtc_supports_ips(crtc) &&
5527
				   hsw_crtc_supports_ips(crtc) &&
5197
				   pipe_config->pipe_bpp <= 24;
5528
				   pipe_config->pipe_bpp <= 24;
5198
}
5529
}
5199
 
5530
 
5200
static int intel_crtc_compute_config(struct intel_crtc *crtc,
5531
static int intel_crtc_compute_config(struct intel_crtc *crtc,
5201
				     struct intel_crtc_config *pipe_config)
5532
				     struct intel_crtc_config *pipe_config)
5202
{
5533
{
5203
	struct drm_device *dev = crtc->base.dev;
5534
	struct drm_device *dev = crtc->base.dev;
-
 
5535
	struct drm_i915_private *dev_priv = dev->dev_private;
5204
	struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
5536
	struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
5205
 
5537
 
5206
	/* FIXME should check pixel clock limits on all platforms */
5538
	/* FIXME should check pixel clock limits on all platforms */
5207
	if (INTEL_INFO(dev)->gen < 4) {
5539
	if (INTEL_INFO(dev)->gen < 4) {
5208
		struct drm_i915_private *dev_priv = dev->dev_private;
-
 
5209
		int clock_limit =
5540
		int clock_limit =
5210
			dev_priv->display.get_display_clock_speed(dev);
5541
			dev_priv->display.get_display_clock_speed(dev);
5211
 
5542
 
5212
		/*
5543
		/*
5213
		 * Enable pixel doubling when the dot clock
5544
		 * Enable pixel doubling when the dot clock
5214
		 * is > 90% of the (display) core speed.
5545
		 * is > 90% of the (display) core speed.
5215
		 *
5546
		 *
5216
		 * GDG double wide on either pipe,
5547
		 * GDG double wide on either pipe,
5217
		 * otherwise pipe A only.
5548
		 * otherwise pipe A only.
5218
		 */
5549
		 */
5219
		if ((crtc->pipe == PIPE_A || IS_I915G(dev)) &&
5550
		if ((crtc->pipe == PIPE_A || IS_I915G(dev)) &&
5220
		    adjusted_mode->crtc_clock > clock_limit * 9 / 10) {
5551
		    adjusted_mode->crtc_clock > clock_limit * 9 / 10) {
5221
			clock_limit *= 2;
5552
			clock_limit *= 2;
5222
			pipe_config->double_wide = true;
5553
			pipe_config->double_wide = true;
5223
		}
5554
		}
5224
 
5555
 
5225
		if (adjusted_mode->crtc_clock > clock_limit * 9 / 10)
5556
		if (adjusted_mode->crtc_clock > clock_limit * 9 / 10)
5226
			return -EINVAL;
5557
			return -EINVAL;
5227
	}
5558
	}
5228
 
5559
 
5229
	/*
5560
	/*
5230
	 * Pipe horizontal size must be even in:
5561
	 * Pipe horizontal size must be even in:
5231
	 * - DVO ganged mode
5562
	 * - DVO ganged mode
5232
	 * - LVDS dual channel mode
5563
	 * - LVDS dual channel mode
5233
	 * - Double wide pipe
5564
	 * - Double wide pipe
5234
	 */
5565
	 */
5235
	if ((intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
5566
	if ((intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
5236
	     intel_is_dual_link_lvds(dev)) || pipe_config->double_wide)
5567
	     intel_is_dual_link_lvds(dev)) || pipe_config->double_wide)
5237
		pipe_config->pipe_src_w &= ~1;
5568
		pipe_config->pipe_src_w &= ~1;
5238
 
5569
 
5239
	/* Cantiga+ cannot handle modes with a hsync front porch of 0.
5570
	/* Cantiga+ cannot handle modes with a hsync front porch of 0.
5240
	 * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
5571
	 * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
5241
	 */
5572
	 */
5242
	if ((INTEL_INFO(dev)->gen > 4 || IS_G4X(dev)) &&
5573
	if ((INTEL_INFO(dev)->gen > 4 || IS_G4X(dev)) &&
5243
		adjusted_mode->hsync_start == adjusted_mode->hdisplay)
5574
		adjusted_mode->hsync_start == adjusted_mode->hdisplay)
5244
		return -EINVAL;
5575
		return -EINVAL;
5245
 
5576
 
5246
	if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)) && pipe_config->pipe_bpp > 10*3) {
5577
	if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)) && pipe_config->pipe_bpp > 10*3) {
5247
		pipe_config->pipe_bpp = 10*3; /* 12bpc is gen5+ */
5578
		pipe_config->pipe_bpp = 10*3; /* 12bpc is gen5+ */
5248
	} else if (INTEL_INFO(dev)->gen <= 4 && pipe_config->pipe_bpp > 8*3) {
5579
	} else if (INTEL_INFO(dev)->gen <= 4 && pipe_config->pipe_bpp > 8*3) {
5249
		/* only a 8bpc pipe, with 6bpc dither through the panel fitter
5580
		/* only a 8bpc pipe, with 6bpc dither through the panel fitter
5250
		 * for lvds. */
5581
		 * for lvds. */
5251
		pipe_config->pipe_bpp = 8*3;
5582
		pipe_config->pipe_bpp = 8*3;
5252
	}
5583
	}
5253
 
5584
 
5254
	if (HAS_IPS(dev))
5585
	if (HAS_IPS(dev))
5255
		hsw_compute_ips_config(crtc, pipe_config);
5586
		hsw_compute_ips_config(crtc, pipe_config);
5256
 
-
 
5257
	/*
-
 
5258
	 * XXX: PCH/WRPLL clock sharing is done in ->mode_set, so make sure the
-
 
5259
	 * old clock survives for now.
-
 
5260
	 */
-
 
5261
	if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev) || HAS_DDI(dev))
-
 
5262
		pipe_config->shared_dpll = crtc->config.shared_dpll;
-
 
5263
 
5587
 
5264
	if (pipe_config->has_pch_encoder)
5588
	if (pipe_config->has_pch_encoder)
5265
		return ironlake_fdi_compute_config(crtc, pipe_config);
5589
		return ironlake_fdi_compute_config(crtc, pipe_config);
5266
 
5590
 
5267
	return 0;
5591
	return 0;
5268
}
5592
}
5269
 
5593
 
5270
static int valleyview_get_display_clock_speed(struct drm_device *dev)
5594
static int valleyview_get_display_clock_speed(struct drm_device *dev)
5271
{
5595
{
5272
	struct drm_i915_private *dev_priv = dev->dev_private;
5596
	struct drm_i915_private *dev_priv = dev->dev_private;
5273
	int vco = valleyview_get_vco(dev_priv);
-
 
5274
	u32 val;
5597
	u32 val;
5275
	int divider;
5598
	int divider;
-
 
5599
 
-
 
5600
	/* FIXME: Punit isn't quite ready yet */
-
 
5601
	if (IS_CHERRYVIEW(dev))
-
 
5602
		return 400000;
-
 
5603
 
-
 
5604
	if (dev_priv->hpll_freq == 0)
-
 
5605
		dev_priv->hpll_freq = valleyview_get_vco(dev_priv);
5276
 
5606
 
5277
	mutex_lock(&dev_priv->dpio_lock);
5607
	mutex_lock(&dev_priv->dpio_lock);
5278
	val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL);
5608
	val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL);
5279
	mutex_unlock(&dev_priv->dpio_lock);
5609
	mutex_unlock(&dev_priv->dpio_lock);
5280
 
5610
 
5281
	divider = val & DISPLAY_FREQUENCY_VALUES;
5611
	divider = val & DISPLAY_FREQUENCY_VALUES;
5282
 
5612
 
5283
	WARN((val & DISPLAY_FREQUENCY_STATUS) !=
5613
	WARN((val & DISPLAY_FREQUENCY_STATUS) !=
5284
	     (divider << DISPLAY_FREQUENCY_STATUS_SHIFT),
5614
	     (divider << DISPLAY_FREQUENCY_STATUS_SHIFT),
5285
	     "cdclk change in progress\n");
5615
	     "cdclk change in progress\n");
5286
 
5616
 
5287
	return DIV_ROUND_CLOSEST(vco << 1, divider + 1);
5617
	return DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, divider + 1);
5288
}
5618
}
5289
 
5619
 
5290
static int i945_get_display_clock_speed(struct drm_device *dev)
5620
static int i945_get_display_clock_speed(struct drm_device *dev)
5291
{
5621
{
5292
	return 400000;
5622
	return 400000;
5293
}
5623
}
5294
 
5624
 
5295
static int i915_get_display_clock_speed(struct drm_device *dev)
5625
static int i915_get_display_clock_speed(struct drm_device *dev)
5296
{
5626
{
5297
	return 333000;
5627
	return 333000;
5298
}
5628
}
5299
 
5629
 
5300
static int i9xx_misc_get_display_clock_speed(struct drm_device *dev)
5630
static int i9xx_misc_get_display_clock_speed(struct drm_device *dev)
5301
{
5631
{
5302
	return 200000;
5632
	return 200000;
5303
}
5633
}
5304
 
5634
 
5305
static int pnv_get_display_clock_speed(struct drm_device *dev)
5635
static int pnv_get_display_clock_speed(struct drm_device *dev)
5306
{
5636
{
5307
	u16 gcfgc = 0;
5637
	u16 gcfgc = 0;
5308
 
5638
 
5309
	pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
5639
	pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
5310
 
5640
 
5311
	switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
5641
	switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
5312
	case GC_DISPLAY_CLOCK_267_MHZ_PNV:
5642
	case GC_DISPLAY_CLOCK_267_MHZ_PNV:
5313
		return 267000;
5643
		return 267000;
5314
	case GC_DISPLAY_CLOCK_333_MHZ_PNV:
5644
	case GC_DISPLAY_CLOCK_333_MHZ_PNV:
5315
		return 333000;
5645
		return 333000;
5316
	case GC_DISPLAY_CLOCK_444_MHZ_PNV:
5646
	case GC_DISPLAY_CLOCK_444_MHZ_PNV:
5317
		return 444000;
5647
		return 444000;
5318
	case GC_DISPLAY_CLOCK_200_MHZ_PNV:
5648
	case GC_DISPLAY_CLOCK_200_MHZ_PNV:
5319
		return 200000;
5649
		return 200000;
5320
	default:
5650
	default:
5321
		DRM_ERROR("Unknown pnv display core clock 0x%04x\n", gcfgc);
5651
		DRM_ERROR("Unknown pnv display core clock 0x%04x\n", gcfgc);
5322
	case GC_DISPLAY_CLOCK_133_MHZ_PNV:
5652
	case GC_DISPLAY_CLOCK_133_MHZ_PNV:
5323
		return 133000;
5653
		return 133000;
5324
	case GC_DISPLAY_CLOCK_167_MHZ_PNV:
5654
	case GC_DISPLAY_CLOCK_167_MHZ_PNV:
5325
		return 167000;
5655
		return 167000;
5326
	}
5656
	}
5327
}
5657
}
5328
 
5658
 
5329
static int i915gm_get_display_clock_speed(struct drm_device *dev)
5659
static int i915gm_get_display_clock_speed(struct drm_device *dev)
5330
{
5660
{
5331
	u16 gcfgc = 0;
5661
	u16 gcfgc = 0;
5332
 
5662
 
5333
	pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
5663
	pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
5334
 
5664
 
5335
	if (gcfgc & GC_LOW_FREQUENCY_ENABLE)
5665
	if (gcfgc & GC_LOW_FREQUENCY_ENABLE)
5336
		return 133000;
5666
		return 133000;
5337
	else {
5667
	else {
5338
		switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
5668
		switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
5339
		case GC_DISPLAY_CLOCK_333_MHZ:
5669
		case GC_DISPLAY_CLOCK_333_MHZ:
5340
			return 333000;
5670
			return 333000;
5341
		default:
5671
		default:
5342
		case GC_DISPLAY_CLOCK_190_200_MHZ:
5672
		case GC_DISPLAY_CLOCK_190_200_MHZ:
5343
			return 190000;
5673
			return 190000;
5344
		}
5674
		}
5345
	}
5675
	}
5346
}
5676
}
5347
 
5677
 
5348
static int i865_get_display_clock_speed(struct drm_device *dev)
5678
static int i865_get_display_clock_speed(struct drm_device *dev)
5349
{
5679
{
5350
	return 266000;
5680
	return 266000;
5351
}
5681
}
5352
 
5682
 
5353
static int i855_get_display_clock_speed(struct drm_device *dev)
5683
static int i855_get_display_clock_speed(struct drm_device *dev)
5354
{
5684
{
5355
	u16 hpllcc = 0;
5685
	u16 hpllcc = 0;
5356
	/* Assume that the hardware is in the high speed state.  This
5686
	/* Assume that the hardware is in the high speed state.  This
5357
	 * should be the default.
5687
	 * should be the default.
5358
	 */
5688
	 */
5359
	switch (hpllcc & GC_CLOCK_CONTROL_MASK) {
5689
	switch (hpllcc & GC_CLOCK_CONTROL_MASK) {
5360
	case GC_CLOCK_133_200:
5690
	case GC_CLOCK_133_200:
5361
	case GC_CLOCK_100_200:
5691
	case GC_CLOCK_100_200:
5362
		return 200000;
5692
		return 200000;
5363
	case GC_CLOCK_166_250:
5693
	case GC_CLOCK_166_250:
5364
		return 250000;
5694
		return 250000;
5365
	case GC_CLOCK_100_133:
5695
	case GC_CLOCK_100_133:
5366
		return 133000;
5696
		return 133000;
5367
	}
5697
	}
5368
 
5698
 
5369
	/* Shouldn't happen */
5699
	/* Shouldn't happen */
5370
	return 0;
5700
	return 0;
5371
}
5701
}
5372
 
5702
 
5373
static int i830_get_display_clock_speed(struct drm_device *dev)
5703
static int i830_get_display_clock_speed(struct drm_device *dev)
5374
{
5704
{
5375
	return 133000;
5705
	return 133000;
5376
}
5706
}
5377
 
5707
 
5378
static void
5708
static void
5379
intel_reduce_m_n_ratio(uint32_t *num, uint32_t *den)
5709
intel_reduce_m_n_ratio(uint32_t *num, uint32_t *den)
5380
{
5710
{
5381
	while (*num > DATA_LINK_M_N_MASK ||
5711
	while (*num > DATA_LINK_M_N_MASK ||
5382
	       *den > DATA_LINK_M_N_MASK) {
5712
	       *den > DATA_LINK_M_N_MASK) {
5383
		*num >>= 1;
5713
		*num >>= 1;
5384
		*den >>= 1;
5714
		*den >>= 1;
5385
	}
5715
	}
5386
}
5716
}
5387
 
5717
 
5388
static void compute_m_n(unsigned int m, unsigned int n,
5718
static void compute_m_n(unsigned int m, unsigned int n,
5389
			uint32_t *ret_m, uint32_t *ret_n)
5719
			uint32_t *ret_m, uint32_t *ret_n)
5390
{
5720
{
5391
	*ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
5721
	*ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
5392
	*ret_m = div_u64((uint64_t) m * *ret_n, n);
5722
	*ret_m = div_u64((uint64_t) m * *ret_n, n);
5393
	intel_reduce_m_n_ratio(ret_m, ret_n);
5723
	intel_reduce_m_n_ratio(ret_m, ret_n);
5394
}
5724
}
5395
 
5725
 
5396
void
5726
void
5397
intel_link_compute_m_n(int bits_per_pixel, int nlanes,
5727
intel_link_compute_m_n(int bits_per_pixel, int nlanes,
5398
		       int pixel_clock, int link_clock,
5728
		       int pixel_clock, int link_clock,
5399
		       struct intel_link_m_n *m_n)
5729
		       struct intel_link_m_n *m_n)
5400
{
5730
{
5401
	m_n->tu = 64;
5731
	m_n->tu = 64;
5402
 
5732
 
5403
	compute_m_n(bits_per_pixel * pixel_clock,
5733
	compute_m_n(bits_per_pixel * pixel_clock,
5404
		    link_clock * nlanes * 8,
5734
		    link_clock * nlanes * 8,
5405
		    &m_n->gmch_m, &m_n->gmch_n);
5735
		    &m_n->gmch_m, &m_n->gmch_n);
5406
 
5736
 
5407
	compute_m_n(pixel_clock, link_clock,
5737
	compute_m_n(pixel_clock, link_clock,
5408
		    &m_n->link_m, &m_n->link_n);
5738
		    &m_n->link_m, &m_n->link_n);
5409
}
5739
}
5410
 
5740
 
5411
static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
5741
static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
5412
{
5742
{
5413
	if (i915.panel_use_ssc >= 0)
5743
	if (i915.panel_use_ssc >= 0)
5414
		return i915.panel_use_ssc != 0;
5744
		return i915.panel_use_ssc != 0;
5415
	return dev_priv->vbt.lvds_use_ssc
5745
	return dev_priv->vbt.lvds_use_ssc
5416
		&& !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
5746
		&& !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
5417
}
5747
}
5418
 
5748
 
5419
static int i9xx_get_refclk(struct drm_crtc *crtc, int num_connectors)
5749
static int i9xx_get_refclk(struct intel_crtc *crtc, int num_connectors)
5420
{
5750
{
5421
	struct drm_device *dev = crtc->dev;
5751
	struct drm_device *dev = crtc->base.dev;
5422
	struct drm_i915_private *dev_priv = dev->dev_private;
5752
	struct drm_i915_private *dev_priv = dev->dev_private;
5423
	int refclk;
5753
	int refclk;
5424
 
5754
 
5425
	if (IS_VALLEYVIEW(dev)) {
5755
	if (IS_VALLEYVIEW(dev)) {
5426
		refclk = 100000;
5756
		refclk = 100000;
5427
	} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
5757
	} else if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS) &&
5428
	    intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
5758
	    intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
5429
		refclk = dev_priv->vbt.lvds_ssc_freq;
5759
		refclk = dev_priv->vbt.lvds_ssc_freq;
5430
		DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
5760
		DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
5431
	} else if (!IS_GEN2(dev)) {
5761
	} else if (!IS_GEN2(dev)) {
5432
		refclk = 96000;
5762
		refclk = 96000;
5433
	} else {
5763
	} else {
5434
		refclk = 48000;
5764
		refclk = 48000;
5435
	}
5765
	}
5436
 
5766
 
5437
	return refclk;
5767
	return refclk;
5438
}
5768
}
5439
 
5769
 
5440
static uint32_t pnv_dpll_compute_fp(struct dpll *dpll)
5770
static uint32_t pnv_dpll_compute_fp(struct dpll *dpll)
5441
{
5771
{
5442
	return (1 << dpll->n) << 16 | dpll->m2;
5772
	return (1 << dpll->n) << 16 | dpll->m2;
5443
}
5773
}
5444
 
5774
 
5445
static uint32_t i9xx_dpll_compute_fp(struct dpll *dpll)
5775
static uint32_t i9xx_dpll_compute_fp(struct dpll *dpll)
5446
{
5776
{
5447
	return dpll->n << 16 | dpll->m1 << 8 | dpll->m2;
5777
	return dpll->n << 16 | dpll->m1 << 8 | dpll->m2;
5448
}
5778
}
5449
 
5779
 
5450
static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
5780
static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
5451
				     intel_clock_t *reduced_clock)
5781
				     intel_clock_t *reduced_clock)
5452
{
5782
{
5453
	struct drm_device *dev = crtc->base.dev;
5783
	struct drm_device *dev = crtc->base.dev;
5454
	u32 fp, fp2 = 0;
5784
	u32 fp, fp2 = 0;
5455
 
5785
 
5456
	if (IS_PINEVIEW(dev)) {
5786
	if (IS_PINEVIEW(dev)) {
5457
		fp = pnv_dpll_compute_fp(&crtc->config.dpll);
5787
		fp = pnv_dpll_compute_fp(&crtc->new_config->dpll);
5458
		if (reduced_clock)
5788
		if (reduced_clock)
5459
			fp2 = pnv_dpll_compute_fp(reduced_clock);
5789
			fp2 = pnv_dpll_compute_fp(reduced_clock);
5460
	} else {
5790
	} else {
5461
		fp = i9xx_dpll_compute_fp(&crtc->config.dpll);
5791
		fp = i9xx_dpll_compute_fp(&crtc->new_config->dpll);
5462
		if (reduced_clock)
5792
		if (reduced_clock)
5463
			fp2 = i9xx_dpll_compute_fp(reduced_clock);
5793
			fp2 = i9xx_dpll_compute_fp(reduced_clock);
5464
	}
5794
	}
5465
 
5795
 
5466
	crtc->config.dpll_hw_state.fp0 = fp;
5796
	crtc->new_config->dpll_hw_state.fp0 = fp;
5467
 
5797
 
5468
	crtc->lowfreq_avail = false;
5798
	crtc->lowfreq_avail = false;
5469
	if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
5799
	if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS) &&
5470
	    reduced_clock && i915.powersave) {
5800
	    reduced_clock && i915.powersave) {
5471
		crtc->config.dpll_hw_state.fp1 = fp2;
5801
		crtc->new_config->dpll_hw_state.fp1 = fp2;
5472
		crtc->lowfreq_avail = true;
5802
		crtc->lowfreq_avail = true;
5473
	} else {
5803
	} else {
5474
		crtc->config.dpll_hw_state.fp1 = fp;
5804
		crtc->new_config->dpll_hw_state.fp1 = fp;
5475
	}
5805
	}
5476
}
5806
}
5477
 
5807
 
5478
static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe
5808
static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe
5479
		pipe)
5809
		pipe)
5480
{
5810
{
5481
	u32 reg_val;
5811
	u32 reg_val;
5482
 
5812
 
5483
	/*
5813
	/*
5484
	 * PLLB opamp always calibrates to max value of 0x3f, force enable it
5814
	 * PLLB opamp always calibrates to max value of 0x3f, force enable it
5485
	 * and set it to a reasonable value instead.
5815
	 * and set it to a reasonable value instead.
5486
	 */
5816
	 */
5487
	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
5817
	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
5488
	reg_val &= 0xffffff00;
5818
	reg_val &= 0xffffff00;
5489
	reg_val |= 0x00000030;
5819
	reg_val |= 0x00000030;
5490
	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
5820
	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
5491
 
5821
 
5492
	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
5822
	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
5493
	reg_val &= 0x8cffffff;
5823
	reg_val &= 0x8cffffff;
5494
	reg_val = 0x8c000000;
5824
	reg_val = 0x8c000000;
5495
	vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
5825
	vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
5496
 
5826
 
5497
	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
5827
	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
5498
	reg_val &= 0xffffff00;
5828
	reg_val &= 0xffffff00;
5499
	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
5829
	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
5500
 
5830
 
5501
	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
5831
	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
5502
	reg_val &= 0x00ffffff;
5832
	reg_val &= 0x00ffffff;
5503
	reg_val |= 0xb0000000;
5833
	reg_val |= 0xb0000000;
5504
	vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
5834
	vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
5505
}
5835
}
5506
 
5836
 
5507
static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc,
5837
static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc,
5508
					 struct intel_link_m_n *m_n)
5838
					 struct intel_link_m_n *m_n)
5509
{
5839
{
5510
	struct drm_device *dev = crtc->base.dev;
5840
	struct drm_device *dev = crtc->base.dev;
5511
	struct drm_i915_private *dev_priv = dev->dev_private;
5841
	struct drm_i915_private *dev_priv = dev->dev_private;
5512
	int pipe = crtc->pipe;
5842
	int pipe = crtc->pipe;
5513
 
5843
 
5514
	I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
5844
	I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
5515
	I915_WRITE(PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
5845
	I915_WRITE(PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
5516
	I915_WRITE(PCH_TRANS_LINK_M1(pipe), m_n->link_m);
5846
	I915_WRITE(PCH_TRANS_LINK_M1(pipe), m_n->link_m);
5517
	I915_WRITE(PCH_TRANS_LINK_N1(pipe), m_n->link_n);
5847
	I915_WRITE(PCH_TRANS_LINK_N1(pipe), m_n->link_n);
5518
}
5848
}
5519
 
5849
 
5520
static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
5850
static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
5521
					 struct intel_link_m_n *m_n)
5851
					 struct intel_link_m_n *m_n,
-
 
5852
					 struct intel_link_m_n *m2_n2)
5522
{
5853
{
5523
	struct drm_device *dev = crtc->base.dev;
5854
	struct drm_device *dev = crtc->base.dev;
5524
	struct drm_i915_private *dev_priv = dev->dev_private;
5855
	struct drm_i915_private *dev_priv = dev->dev_private;
5525
	int pipe = crtc->pipe;
5856
	int pipe = crtc->pipe;
5526
	enum transcoder transcoder = crtc->config.cpu_transcoder;
5857
	enum transcoder transcoder = crtc->config.cpu_transcoder;
5527
 
5858
 
5528
	if (INTEL_INFO(dev)->gen >= 5) {
5859
	if (INTEL_INFO(dev)->gen >= 5) {
5529
		I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m);
5860
		I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m);
5530
		I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n);
5861
		I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n);
5531
		I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m);
5862
		I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m);
5532
		I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n);
5863
		I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n);
-
 
5864
		/* M2_N2 registers to be set only for gen < 8 (M2_N2 available
-
 
5865
		 * for gen < 8) and if DRRS is supported (to make sure the
-
 
5866
		 * registers are not unnecessarily accessed).
-
 
5867
		 */
-
 
5868
		if (m2_n2 && INTEL_INFO(dev)->gen < 8 &&
-
 
5869
			crtc->config.has_drrs) {
-
 
5870
			I915_WRITE(PIPE_DATA_M2(transcoder),
-
 
5871
					TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
-
 
5872
			I915_WRITE(PIPE_DATA_N2(transcoder), m2_n2->gmch_n);
-
 
5873
			I915_WRITE(PIPE_LINK_M2(transcoder), m2_n2->link_m);
-
 
5874
			I915_WRITE(PIPE_LINK_N2(transcoder), m2_n2->link_n);
-
 
5875
		}
5533
	} else {
5876
	} else {
5534
		I915_WRITE(PIPE_DATA_M_G4X(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
5877
		I915_WRITE(PIPE_DATA_M_G4X(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
5535
		I915_WRITE(PIPE_DATA_N_G4X(pipe), m_n->gmch_n);
5878
		I915_WRITE(PIPE_DATA_N_G4X(pipe), m_n->gmch_n);
5536
		I915_WRITE(PIPE_LINK_M_G4X(pipe), m_n->link_m);
5879
		I915_WRITE(PIPE_LINK_M_G4X(pipe), m_n->link_m);
5537
		I915_WRITE(PIPE_LINK_N_G4X(pipe), m_n->link_n);
5880
		I915_WRITE(PIPE_LINK_N_G4X(pipe), m_n->link_n);
5538
	}
5881
	}
5539
}
5882
}
5540
 
5883
 
5541
static void intel_dp_set_m_n(struct intel_crtc *crtc)
5884
void intel_dp_set_m_n(struct intel_crtc *crtc)
5542
{
5885
{
5543
	if (crtc->config.has_pch_encoder)
5886
	if (crtc->config.has_pch_encoder)
5544
		intel_pch_transcoder_set_m_n(crtc, &crtc->config.dp_m_n);
5887
		intel_pch_transcoder_set_m_n(crtc, &crtc->config.dp_m_n);
5545
	else
5888
	else
5546
		intel_cpu_transcoder_set_m_n(crtc, &crtc->config.dp_m_n);
5889
		intel_cpu_transcoder_set_m_n(crtc, &crtc->config.dp_m_n,
-
 
5890
						   &crtc->config.dp_m2_n2);
5547
}
5891
}
5548
 
5892
 
-
 
5893
static void vlv_update_pll(struct intel_crtc *crtc,
5549
static void vlv_update_pll(struct intel_crtc *crtc)
5894
			   struct intel_crtc_config *pipe_config)
5550
{
5895
{
5551
	u32 dpll, dpll_md;
5896
	u32 dpll, dpll_md;
5552
 
5897
 
5553
	/*
5898
	/*
5554
	 * Enable DPIO clock input. We should never disable the reference
5899
	 * Enable DPIO clock input. We should never disable the reference
5555
	 * clock for pipe B, since VGA hotplug / manual detection depends
5900
	 * clock for pipe B, since VGA hotplug / manual detection depends
5556
	 * on it.
5901
	 * on it.
5557
	 */
5902
	 */
5558
	dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REFA_CLK_ENABLE_VLV |
5903
	dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REFA_CLK_ENABLE_VLV |
5559
		DPLL_VGA_MODE_DIS | DPLL_INTEGRATED_CLOCK_VLV;
5904
		DPLL_VGA_MODE_DIS | DPLL_INTEGRATED_CLOCK_VLV;
5560
	/* We should never disable this, set it here for state tracking */
5905
	/* We should never disable this, set it here for state tracking */
5561
	if (crtc->pipe == PIPE_B)
5906
	if (crtc->pipe == PIPE_B)
5562
		dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
5907
		dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
5563
	dpll |= DPLL_VCO_ENABLE;
5908
	dpll |= DPLL_VCO_ENABLE;
5564
	crtc->config.dpll_hw_state.dpll = dpll;
5909
	pipe_config->dpll_hw_state.dpll = dpll;
5565
 
5910
 
5566
	dpll_md = (crtc->config.pixel_multiplier - 1)
5911
	dpll_md = (pipe_config->pixel_multiplier - 1)
5567
		<< DPLL_MD_UDI_MULTIPLIER_SHIFT;
5912
		<< DPLL_MD_UDI_MULTIPLIER_SHIFT;
5568
	crtc->config.dpll_hw_state.dpll_md = dpll_md;
5913
	pipe_config->dpll_hw_state.dpll_md = dpll_md;
5569
}
5914
}
-
 
5915
 
5570
 
5916
static void vlv_prepare_pll(struct intel_crtc *crtc,
5571
static void vlv_prepare_pll(struct intel_crtc *crtc)
5917
			    const struct intel_crtc_config *pipe_config)
5572
{
5918
{
5573
	struct drm_device *dev = crtc->base.dev;
5919
	struct drm_device *dev = crtc->base.dev;
5574
	struct drm_i915_private *dev_priv = dev->dev_private;
5920
	struct drm_i915_private *dev_priv = dev->dev_private;
5575
	int pipe = crtc->pipe;
5921
	int pipe = crtc->pipe;
5576
	u32 mdiv;
5922
	u32 mdiv;
5577
	u32 bestn, bestm1, bestm2, bestp1, bestp2;
5923
	u32 bestn, bestm1, bestm2, bestp1, bestp2;
5578
	u32 coreclk, reg_val;
5924
	u32 coreclk, reg_val;
5579
 
5925
 
5580
	mutex_lock(&dev_priv->dpio_lock);
5926
	mutex_lock(&dev_priv->dpio_lock);
5581
 
5927
 
5582
	bestn = crtc->config.dpll.n;
5928
	bestn = pipe_config->dpll.n;
5583
	bestm1 = crtc->config.dpll.m1;
5929
	bestm1 = pipe_config->dpll.m1;
5584
	bestm2 = crtc->config.dpll.m2;
5930
	bestm2 = pipe_config->dpll.m2;
5585
	bestp1 = crtc->config.dpll.p1;
5931
	bestp1 = pipe_config->dpll.p1;
5586
	bestp2 = crtc->config.dpll.p2;
5932
	bestp2 = pipe_config->dpll.p2;
5587
 
5933
 
5588
	/* See eDP HDMI DPIO driver vbios notes doc */
5934
	/* See eDP HDMI DPIO driver vbios notes doc */
5589
 
5935
 
5590
	/* PLL B needs special handling */
5936
	/* PLL B needs special handling */
5591
	if (pipe == PIPE_B)
5937
	if (pipe == PIPE_B)
5592
		vlv_pllb_recal_opamp(dev_priv, pipe);
5938
		vlv_pllb_recal_opamp(dev_priv, pipe);
5593
 
5939
 
5594
	/* Set up Tx target for periodic Rcomp update */
5940
	/* Set up Tx target for periodic Rcomp update */
5595
	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f);
5941
	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f);
5596
 
5942
 
5597
	/* Disable target IRef on PLL */
5943
	/* Disable target IRef on PLL */
5598
	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe));
5944
	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe));
5599
	reg_val &= 0x00ffffff;
5945
	reg_val &= 0x00ffffff;
5600
	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val);
5946
	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val);
5601
 
5947
 
5602
	/* Disable fast lock */
5948
	/* Disable fast lock */
5603
	vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610);
5949
	vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610);
5604
 
5950
 
5605
	/* Set idtafcrecal before PLL is enabled */
5951
	/* Set idtafcrecal before PLL is enabled */
5606
	mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
5952
	mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
5607
	mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT));
5953
	mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT));
5608
	mdiv |= ((bestn << DPIO_N_SHIFT));
5954
	mdiv |= ((bestn << DPIO_N_SHIFT));
5609
	mdiv |= (1 << DPIO_K_SHIFT);
5955
	mdiv |= (1 << DPIO_K_SHIFT);
5610
 
5956
 
5611
	/*
5957
	/*
5612
	 * Post divider depends on pixel clock rate, DAC vs digital (and LVDS,
5958
	 * Post divider depends on pixel clock rate, DAC vs digital (and LVDS,
5613
	 * but we don't support that).
5959
	 * but we don't support that).
5614
	 * Note: don't use the DAC post divider as it seems unstable.
5960
	 * Note: don't use the DAC post divider as it seems unstable.
5615
	 */
5961
	 */
5616
	mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT);
5962
	mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT);
5617
	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
5963
	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
5618
 
5964
 
5619
	mdiv |= DPIO_ENABLE_CALIBRATION;
5965
	mdiv |= DPIO_ENABLE_CALIBRATION;
5620
	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
5966
	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
5621
 
5967
 
5622
	/* Set HBR and RBR LPF coefficients */
5968
	/* Set HBR and RBR LPF coefficients */
5623
	if (crtc->config.port_clock == 162000 ||
5969
	if (pipe_config->port_clock == 162000 ||
5624
	    intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_ANALOG) ||
5970
	    intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG) ||
5625
	    intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI))
5971
	    intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI))
5626
		vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
5972
		vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
5627
				 0x009f0003);
5973
				 0x009f0003);
5628
	else
5974
	else
5629
		vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
5975
		vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
5630
				 0x00d0000f);
5976
				 0x00d0000f);
5631
 
5977
 
5632
	if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP) ||
-
 
5633
	    intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT)) {
5978
	if (crtc->config.has_dp_encoder) {
5634
		/* Use SSC source */
5979
		/* Use SSC source */
5635
		if (pipe == PIPE_A)
5980
		if (pipe == PIPE_A)
5636
			vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
5981
			vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
5637
					 0x0df40000);
5982
					 0x0df40000);
5638
		else
5983
		else
5639
			vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
5984
			vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
5640
					 0x0df70000);
5985
					 0x0df70000);
5641
	} else { /* HDMI or VGA */
5986
	} else { /* HDMI or VGA */
5642
		/* Use bend source */
5987
		/* Use bend source */
5643
		if (pipe == PIPE_A)
5988
		if (pipe == PIPE_A)
5644
			vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
5989
			vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
5645
					 0x0df70000);
5990
					 0x0df70000);
5646
		else
5991
		else
5647
			vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
5992
			vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
5648
					 0x0df40000);
5993
					 0x0df40000);
5649
	}
5994
	}
5650
 
5995
 
5651
	coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe));
5996
	coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe));
5652
	coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
5997
	coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
5653
	if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT) ||
5998
	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
5654
	    intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP))
5999
	    intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
5655
		coreclk |= 0x01000000;
6000
		coreclk |= 0x01000000;
5656
	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
6001
	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
5657
 
6002
 
5658
	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000);
6003
	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000);
5659
	mutex_unlock(&dev_priv->dpio_lock);
6004
	mutex_unlock(&dev_priv->dpio_lock);
5660
}
6005
}
5661
 
6006
 
-
 
6007
static void chv_update_pll(struct intel_crtc *crtc,
-
 
6008
			   struct intel_crtc_config *pipe_config)
-
 
6009
{
-
 
6010
	pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLOCK_CHV |
-
 
6011
		DPLL_REFA_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS |
-
 
6012
		DPLL_VCO_ENABLE;
-
 
6013
	if (crtc->pipe != PIPE_A)
-
 
6014
		pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
-
 
6015
 
-
 
6016
	pipe_config->dpll_hw_state.dpll_md =
-
 
6017
		(pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
-
 
6018
}
-
 
6019
 
-
 
6020
static void chv_prepare_pll(struct intel_crtc *crtc,
5662
static void chv_update_pll(struct intel_crtc *crtc)
6021
			    const struct intel_crtc_config *pipe_config)
5663
{
6022
{
5664
	struct drm_device *dev = crtc->base.dev;
6023
	struct drm_device *dev = crtc->base.dev;
5665
	struct drm_i915_private *dev_priv = dev->dev_private;
6024
	struct drm_i915_private *dev_priv = dev->dev_private;
5666
	int pipe = crtc->pipe;
6025
	int pipe = crtc->pipe;
5667
	int dpll_reg = DPLL(crtc->pipe);
6026
	int dpll_reg = DPLL(crtc->pipe);
5668
	enum dpio_channel port = vlv_pipe_to_channel(pipe);
6027
	enum dpio_channel port = vlv_pipe_to_channel(pipe);
5669
	u32 loopfilter, intcoeff;
6028
	u32 loopfilter, intcoeff;
5670
	u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac;
6029
	u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac;
5671
	int refclk;
6030
	int refclk;
5672
 
-
 
5673
	crtc->config.dpll_hw_state.dpll = DPLL_SSC_REF_CLOCK_CHV |
-
 
5674
		DPLL_REFA_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS |
-
 
5675
		DPLL_VCO_ENABLE;
-
 
5676
	if (pipe != PIPE_A)
-
 
5677
		crtc->config.dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
-
 
5678
 
-
 
5679
	crtc->config.dpll_hw_state.dpll_md =
-
 
5680
		(crtc->config.pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
-
 
5681
 
6031
 
5682
	bestn = crtc->config.dpll.n;
6032
	bestn = pipe_config->dpll.n;
5683
	bestm2_frac = crtc->config.dpll.m2 & 0x3fffff;
6033
	bestm2_frac = pipe_config->dpll.m2 & 0x3fffff;
5684
	bestm1 = crtc->config.dpll.m1;
6034
	bestm1 = pipe_config->dpll.m1;
5685
	bestm2 = crtc->config.dpll.m2 >> 22;
6035
	bestm2 = pipe_config->dpll.m2 >> 22;
5686
	bestp1 = crtc->config.dpll.p1;
6036
	bestp1 = pipe_config->dpll.p1;
5687
	bestp2 = crtc->config.dpll.p2;
6037
	bestp2 = pipe_config->dpll.p2;
5688
 
6038
 
5689
	/*
6039
	/*
5690
	 * Enable Refclk and SSC
6040
	 * Enable Refclk and SSC
5691
	 */
6041
	 */
5692
	I915_WRITE(dpll_reg,
6042
	I915_WRITE(dpll_reg,
5693
		   crtc->config.dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
6043
		   pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
5694
 
6044
 
5695
	mutex_lock(&dev_priv->dpio_lock);
6045
	mutex_lock(&dev_priv->dpio_lock);
5696
 
6046
 
5697
	/* p1 and p2 divider */
6047
	/* p1 and p2 divider */
5698
	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port),
6048
	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port),
5699
			5 << DPIO_CHV_S1_DIV_SHIFT |
6049
			5 << DPIO_CHV_S1_DIV_SHIFT |
5700
			bestp1 << DPIO_CHV_P1_DIV_SHIFT |
6050
			bestp1 << DPIO_CHV_P1_DIV_SHIFT |
5701
			bestp2 << DPIO_CHV_P2_DIV_SHIFT |
6051
			bestp2 << DPIO_CHV_P2_DIV_SHIFT |
5702
			1 << DPIO_CHV_K_DIV_SHIFT);
6052
			1 << DPIO_CHV_K_DIV_SHIFT);
5703
 
6053
 
5704
	/* Feedback post-divider - m2 */
6054
	/* Feedback post-divider - m2 */
5705
	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW0(port), bestm2);
6055
	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW0(port), bestm2);
5706
 
6056
 
5707
	/* Feedback refclk divider - n and m1 */
6057
	/* Feedback refclk divider - n and m1 */
5708
	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW1(port),
6058
	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW1(port),
5709
			DPIO_CHV_M1_DIV_BY_2 |
6059
			DPIO_CHV_M1_DIV_BY_2 |
5710
			1 << DPIO_CHV_N_DIV_SHIFT);
6060
			1 << DPIO_CHV_N_DIV_SHIFT);
5711
 
6061
 
5712
	/* M2 fraction division */
6062
	/* M2 fraction division */
5713
	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac);
6063
	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac);
5714
 
6064
 
5715
	/* M2 fraction division enable */
6065
	/* M2 fraction division enable */
5716
	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port),
6066
	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port),
5717
		       DPIO_CHV_FRAC_DIV_EN |
6067
		       DPIO_CHV_FRAC_DIV_EN |
5718
		       (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT));
6068
		       (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT));
5719
 
6069
 
5720
	/* Loop filter */
6070
	/* Loop filter */
5721
	refclk = i9xx_get_refclk(&crtc->base, 0);
6071
	refclk = i9xx_get_refclk(crtc, 0);
5722
	loopfilter = 5 << DPIO_CHV_PROP_COEFF_SHIFT |
6072
	loopfilter = 5 << DPIO_CHV_PROP_COEFF_SHIFT |
5723
		2 << DPIO_CHV_GAIN_CTRL_SHIFT;
6073
		2 << DPIO_CHV_GAIN_CTRL_SHIFT;
5724
	if (refclk == 100000)
6074
	if (refclk == 100000)
5725
		intcoeff = 11;
6075
		intcoeff = 11;
5726
	else if (refclk == 38400)
6076
	else if (refclk == 38400)
5727
		intcoeff = 10;
6077
		intcoeff = 10;
5728
	else
6078
	else
5729
		intcoeff = 9;
6079
		intcoeff = 9;
5730
	loopfilter |= intcoeff << DPIO_CHV_INT_COEFF_SHIFT;
6080
	loopfilter |= intcoeff << DPIO_CHV_INT_COEFF_SHIFT;
5731
	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter);
6081
	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter);
5732
 
6082
 
5733
	/* AFC Recal */
6083
	/* AFC Recal */
5734
	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port),
6084
	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port),
5735
			vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) |
6085
			vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) |
5736
			DPIO_AFC_RECAL);
6086
			DPIO_AFC_RECAL);
5737
 
6087
 
5738
	mutex_unlock(&dev_priv->dpio_lock);
6088
	mutex_unlock(&dev_priv->dpio_lock);
5739
}
6089
}
-
 
6090
 
-
 
6091
/**
-
 
6092
 * vlv_force_pll_on - forcibly enable just the PLL
-
 
6093
 * @dev_priv: i915 private structure
-
 
6094
 * @pipe: pipe PLL to enable
-
 
6095
 * @dpll: PLL configuration
-
 
6096
 *
-
 
6097
 * Enable the PLL for @pipe using the supplied @dpll config. To be used
-
 
6098
 * in cases where we need the PLL enabled even when @pipe is not going to
-
 
6099
 * be enabled.
-
 
6100
 */
-
 
6101
void vlv_force_pll_on(struct drm_device *dev, enum pipe pipe,
-
 
6102
		      const struct dpll *dpll)
-
 
6103
{
-
 
6104
	struct intel_crtc *crtc =
-
 
6105
		to_intel_crtc(intel_get_crtc_for_pipe(dev, pipe));
-
 
6106
	struct intel_crtc_config pipe_config = {
-
 
6107
		.pixel_multiplier = 1,
-
 
6108
		.dpll = *dpll,
-
 
6109
	};
-
 
6110
 
-
 
6111
	if (IS_CHERRYVIEW(dev)) {
-
 
6112
		chv_update_pll(crtc, &pipe_config);
-
 
6113
		chv_prepare_pll(crtc, &pipe_config);
-
 
6114
		chv_enable_pll(crtc, &pipe_config);
-
 
6115
	} else {
-
 
6116
		vlv_update_pll(crtc, &pipe_config);
-
 
6117
		vlv_prepare_pll(crtc, &pipe_config);
-
 
6118
		vlv_enable_pll(crtc, &pipe_config);
-
 
6119
	}
-
 
6120
}
-
 
6121
 
-
 
6122
/**
-
 
6123
 * vlv_force_pll_off - forcibly disable just the PLL
-
 
6124
 * @dev_priv: i915 private structure
-
 
6125
 * @pipe: pipe PLL to disable
-
 
6126
 *
-
 
6127
 * Disable the PLL for @pipe. To be used in cases where we need
-
 
6128
 * the PLL enabled even when @pipe is not going to be enabled.
-
 
6129
 */
-
 
6130
void vlv_force_pll_off(struct drm_device *dev, enum pipe pipe)
-
 
6131
{
-
 
6132
	if (IS_CHERRYVIEW(dev))
-
 
6133
		chv_disable_pll(to_i915(dev), pipe);
-
 
6134
	else
-
 
6135
		vlv_disable_pll(to_i915(dev), pipe);
-
 
6136
}
5740
 
6137
 
5741
static void i9xx_update_pll(struct intel_crtc *crtc,
6138
static void i9xx_update_pll(struct intel_crtc *crtc,
5742
			    intel_clock_t *reduced_clock,
6139
			    intel_clock_t *reduced_clock,
5743
			    int num_connectors)
6140
			    int num_connectors)
5744
{
6141
{
5745
	struct drm_device *dev = crtc->base.dev;
6142
	struct drm_device *dev = crtc->base.dev;
5746
	struct drm_i915_private *dev_priv = dev->dev_private;
6143
	struct drm_i915_private *dev_priv = dev->dev_private;
5747
	u32 dpll;
6144
	u32 dpll;
5748
	bool is_sdvo;
6145
	bool is_sdvo;
5749
	struct dpll *clock = &crtc->config.dpll;
6146
	struct dpll *clock = &crtc->new_config->dpll;
5750
 
6147
 
5751
	i9xx_update_pll_dividers(crtc, reduced_clock);
6148
	i9xx_update_pll_dividers(crtc, reduced_clock);
5752
 
6149
 
5753
	is_sdvo = intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_SDVO) ||
6150
	is_sdvo = intel_pipe_will_have_type(crtc, INTEL_OUTPUT_SDVO) ||
5754
		intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI);
6151
		intel_pipe_will_have_type(crtc, INTEL_OUTPUT_HDMI);
5755
 
6152
 
5756
	dpll = DPLL_VGA_MODE_DIS;
6153
	dpll = DPLL_VGA_MODE_DIS;
5757
 
6154
 
5758
	if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS))
6155
	if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS))
5759
		dpll |= DPLLB_MODE_LVDS;
6156
		dpll |= DPLLB_MODE_LVDS;
5760
	else
6157
	else
5761
		dpll |= DPLLB_MODE_DAC_SERIAL;
6158
		dpll |= DPLLB_MODE_DAC_SERIAL;
5762
 
6159
 
5763
	if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
6160
	if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
5764
			dpll |= (crtc->config.pixel_multiplier - 1)
6161
		dpll |= (crtc->new_config->pixel_multiplier - 1)
5765
				<< SDVO_MULTIPLIER_SHIFT_HIRES;
6162
				<< SDVO_MULTIPLIER_SHIFT_HIRES;
5766
		}
6163
		}
5767
 
6164
 
5768
	if (is_sdvo)
6165
	if (is_sdvo)
5769
		dpll |= DPLL_SDVO_HIGH_SPEED;
6166
		dpll |= DPLL_SDVO_HIGH_SPEED;
5770
 
6167
 
5771
	if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT))
6168
	if (crtc->new_config->has_dp_encoder)
5772
		dpll |= DPLL_SDVO_HIGH_SPEED;
6169
		dpll |= DPLL_SDVO_HIGH_SPEED;
5773
 
6170
 
5774
	/* compute bitmask from p1 value */
6171
	/* compute bitmask from p1 value */
5775
	if (IS_PINEVIEW(dev))
6172
	if (IS_PINEVIEW(dev))
5776
		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
6173
		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
5777
	else {
6174
	else {
5778
		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
6175
		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
5779
		if (IS_G4X(dev) && reduced_clock)
6176
		if (IS_G4X(dev) && reduced_clock)
5780
			dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
6177
			dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
5781
	}
6178
	}
5782
	switch (clock->p2) {
6179
	switch (clock->p2) {
5783
	case 5:
6180
	case 5:
5784
		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
6181
		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
5785
		break;
6182
		break;
5786
	case 7:
6183
	case 7:
5787
		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
6184
		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
5788
		break;
6185
		break;
5789
	case 10:
6186
	case 10:
5790
		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
6187
		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
5791
		break;
6188
		break;
5792
	case 14:
6189
	case 14:
5793
		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
6190
		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
5794
		break;
6191
		break;
5795
	}
6192
	}
5796
	if (INTEL_INFO(dev)->gen >= 4)
6193
	if (INTEL_INFO(dev)->gen >= 4)
5797
		dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
6194
		dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
5798
 
6195
 
5799
	if (crtc->config.sdvo_tv_clock)
6196
	if (crtc->new_config->sdvo_tv_clock)
5800
		dpll |= PLL_REF_INPUT_TVCLKINBC;
6197
		dpll |= PLL_REF_INPUT_TVCLKINBC;
5801
	else if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
6198
	else if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS) &&
5802
		 intel_panel_use_ssc(dev_priv) && num_connectors < 2)
6199
		 intel_panel_use_ssc(dev_priv) && num_connectors < 2)
5803
		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
6200
		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
5804
	else
6201
	else
5805
		dpll |= PLL_REF_INPUT_DREFCLK;
6202
		dpll |= PLL_REF_INPUT_DREFCLK;
5806
 
6203
 
5807
	dpll |= DPLL_VCO_ENABLE;
6204
	dpll |= DPLL_VCO_ENABLE;
5808
	crtc->config.dpll_hw_state.dpll = dpll;
6205
	crtc->new_config->dpll_hw_state.dpll = dpll;
5809
 
6206
 
5810
	if (INTEL_INFO(dev)->gen >= 4) {
6207
	if (INTEL_INFO(dev)->gen >= 4) {
5811
		u32 dpll_md = (crtc->config.pixel_multiplier - 1)
6208
		u32 dpll_md = (crtc->new_config->pixel_multiplier - 1)
5812
					<< DPLL_MD_UDI_MULTIPLIER_SHIFT;
6209
					<< DPLL_MD_UDI_MULTIPLIER_SHIFT;
5813
		crtc->config.dpll_hw_state.dpll_md = dpll_md;
6210
		crtc->new_config->dpll_hw_state.dpll_md = dpll_md;
5814
	}
6211
	}
5815
}
6212
}
5816
 
6213
 
5817
static void i8xx_update_pll(struct intel_crtc *crtc,
6214
static void i8xx_update_pll(struct intel_crtc *crtc,
5818
			    intel_clock_t *reduced_clock,
6215
			    intel_clock_t *reduced_clock,
5819
			    int num_connectors)
6216
			    int num_connectors)
5820
{
6217
{
5821
	struct drm_device *dev = crtc->base.dev;
6218
	struct drm_device *dev = crtc->base.dev;
5822
	struct drm_i915_private *dev_priv = dev->dev_private;
6219
	struct drm_i915_private *dev_priv = dev->dev_private;
5823
	u32 dpll;
6220
	u32 dpll;
5824
	struct dpll *clock = &crtc->config.dpll;
6221
	struct dpll *clock = &crtc->new_config->dpll;
5825
 
6222
 
5826
	i9xx_update_pll_dividers(crtc, reduced_clock);
6223
	i9xx_update_pll_dividers(crtc, reduced_clock);
5827
 
6224
 
5828
	dpll = DPLL_VGA_MODE_DIS;
6225
	dpll = DPLL_VGA_MODE_DIS;
5829
 
6226
 
5830
	if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS)) {
6227
	if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) {
5831
		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
6228
		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
5832
	} else {
6229
	} else {
5833
		if (clock->p1 == 2)
6230
		if (clock->p1 == 2)
5834
			dpll |= PLL_P1_DIVIDE_BY_TWO;
6231
			dpll |= PLL_P1_DIVIDE_BY_TWO;
5835
		else
6232
		else
5836
			dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
6233
			dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
5837
		if (clock->p2 == 4)
6234
		if (clock->p2 == 4)
5838
			dpll |= PLL_P2_DIVIDE_BY_4;
6235
			dpll |= PLL_P2_DIVIDE_BY_4;
5839
	}
6236
	}
5840
 
6237
 
5841
	if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DVO))
6238
	if (!IS_I830(dev) && intel_pipe_will_have_type(crtc, INTEL_OUTPUT_DVO))
5842
		dpll |= DPLL_DVO_2X_MODE;
6239
		dpll |= DPLL_DVO_2X_MODE;
5843
 
6240
 
5844
	if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
6241
	if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS) &&
5845
		 intel_panel_use_ssc(dev_priv) && num_connectors < 2)
6242
		 intel_panel_use_ssc(dev_priv) && num_connectors < 2)
5846
		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
6243
		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
5847
	else
6244
	else
5848
		dpll |= PLL_REF_INPUT_DREFCLK;
6245
		dpll |= PLL_REF_INPUT_DREFCLK;
5849
 
6246
 
5850
	dpll |= DPLL_VCO_ENABLE;
6247
	dpll |= DPLL_VCO_ENABLE;
5851
	crtc->config.dpll_hw_state.dpll = dpll;
6248
	crtc->new_config->dpll_hw_state.dpll = dpll;
5852
}
6249
}
5853
 
6250
 
5854
static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
6251
static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
5855
{
6252
{
5856
	struct drm_device *dev = intel_crtc->base.dev;
6253
	struct drm_device *dev = intel_crtc->base.dev;
5857
	struct drm_i915_private *dev_priv = dev->dev_private;
6254
	struct drm_i915_private *dev_priv = dev->dev_private;
5858
	enum pipe pipe = intel_crtc->pipe;
6255
	enum pipe pipe = intel_crtc->pipe;
5859
	enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
6256
	enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
5860
	struct drm_display_mode *adjusted_mode =
6257
	struct drm_display_mode *adjusted_mode =
5861
		&intel_crtc->config.adjusted_mode;
6258
		&intel_crtc->config.adjusted_mode;
5862
	uint32_t crtc_vtotal, crtc_vblank_end;
6259
	uint32_t crtc_vtotal, crtc_vblank_end;
5863
	int vsyncshift = 0;
6260
	int vsyncshift = 0;
5864
 
6261
 
5865
	/* We need to be careful not to changed the adjusted mode, for otherwise
6262
	/* We need to be careful not to changed the adjusted mode, for otherwise
5866
	 * the hw state checker will get angry at the mismatch. */
6263
	 * the hw state checker will get angry at the mismatch. */
5867
	crtc_vtotal = adjusted_mode->crtc_vtotal;
6264
	crtc_vtotal = adjusted_mode->crtc_vtotal;
5868
	crtc_vblank_end = adjusted_mode->crtc_vblank_end;
6265
	crtc_vblank_end = adjusted_mode->crtc_vblank_end;
5869
 
6266
 
5870
	if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
6267
	if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
5871
		/* the chip adds 2 halflines automatically */
6268
		/* the chip adds 2 halflines automatically */
5872
		crtc_vtotal -= 1;
6269
		crtc_vtotal -= 1;
5873
		crtc_vblank_end -= 1;
6270
		crtc_vblank_end -= 1;
5874
 
6271
 
5875
		if (intel_pipe_has_type(&intel_crtc->base, INTEL_OUTPUT_SDVO))
6272
		if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO))
5876
			vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
6273
			vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
5877
		else
6274
		else
5878
			vsyncshift = adjusted_mode->crtc_hsync_start -
6275
			vsyncshift = adjusted_mode->crtc_hsync_start -
5879
				adjusted_mode->crtc_htotal / 2;
6276
				adjusted_mode->crtc_htotal / 2;
5880
		if (vsyncshift < 0)
6277
		if (vsyncshift < 0)
5881
			vsyncshift += adjusted_mode->crtc_htotal;
6278
			vsyncshift += adjusted_mode->crtc_htotal;
5882
	}
6279
	}
5883
 
6280
 
5884
	if (INTEL_INFO(dev)->gen > 3)
6281
	if (INTEL_INFO(dev)->gen > 3)
5885
		I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift);
6282
		I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift);
5886
 
6283
 
5887
	I915_WRITE(HTOTAL(cpu_transcoder),
6284
	I915_WRITE(HTOTAL(cpu_transcoder),
5888
		   (adjusted_mode->crtc_hdisplay - 1) |
6285
		   (adjusted_mode->crtc_hdisplay - 1) |
5889
		   ((adjusted_mode->crtc_htotal - 1) << 16));
6286
		   ((adjusted_mode->crtc_htotal - 1) << 16));
5890
	I915_WRITE(HBLANK(cpu_transcoder),
6287
	I915_WRITE(HBLANK(cpu_transcoder),
5891
		   (adjusted_mode->crtc_hblank_start - 1) |
6288
		   (adjusted_mode->crtc_hblank_start - 1) |
5892
		   ((adjusted_mode->crtc_hblank_end - 1) << 16));
6289
		   ((adjusted_mode->crtc_hblank_end - 1) << 16));
5893
	I915_WRITE(HSYNC(cpu_transcoder),
6290
	I915_WRITE(HSYNC(cpu_transcoder),
5894
		   (adjusted_mode->crtc_hsync_start - 1) |
6291
		   (adjusted_mode->crtc_hsync_start - 1) |
5895
		   ((adjusted_mode->crtc_hsync_end - 1) << 16));
6292
		   ((adjusted_mode->crtc_hsync_end - 1) << 16));
5896
 
6293
 
5897
	I915_WRITE(VTOTAL(cpu_transcoder),
6294
	I915_WRITE(VTOTAL(cpu_transcoder),
5898
		   (adjusted_mode->crtc_vdisplay - 1) |
6295
		   (adjusted_mode->crtc_vdisplay - 1) |
5899
		   ((crtc_vtotal - 1) << 16));
6296
		   ((crtc_vtotal - 1) << 16));
5900
	I915_WRITE(VBLANK(cpu_transcoder),
6297
	I915_WRITE(VBLANK(cpu_transcoder),
5901
		   (adjusted_mode->crtc_vblank_start - 1) |
6298
		   (adjusted_mode->crtc_vblank_start - 1) |
5902
		   ((crtc_vblank_end - 1) << 16));
6299
		   ((crtc_vblank_end - 1) << 16));
5903
	I915_WRITE(VSYNC(cpu_transcoder),
6300
	I915_WRITE(VSYNC(cpu_transcoder),
5904
		   (adjusted_mode->crtc_vsync_start - 1) |
6301
		   (adjusted_mode->crtc_vsync_start - 1) |
5905
		   ((adjusted_mode->crtc_vsync_end - 1) << 16));
6302
		   ((adjusted_mode->crtc_vsync_end - 1) << 16));
5906
 
6303
 
5907
	/* Workaround: when the EDP input selection is B, the VTOTAL_B must be
6304
	/* Workaround: when the EDP input selection is B, the VTOTAL_B must be
5908
	 * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
6305
	 * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
5909
	 * documented on the DDI_FUNC_CTL register description, EDP Input Select
6306
	 * documented on the DDI_FUNC_CTL register description, EDP Input Select
5910
	 * bits. */
6307
	 * bits. */
5911
	if (IS_HASWELL(dev) && cpu_transcoder == TRANSCODER_EDP &&
6308
	if (IS_HASWELL(dev) && cpu_transcoder == TRANSCODER_EDP &&
5912
	    (pipe == PIPE_B || pipe == PIPE_C))
6309
	    (pipe == PIPE_B || pipe == PIPE_C))
5913
		I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder)));
6310
		I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder)));
5914
 
6311
 
5915
	/* pipesrc controls the size that is scaled from, which should
6312
	/* pipesrc controls the size that is scaled from, which should
5916
	 * always be the user's requested size.
6313
	 * always be the user's requested size.
5917
	 */
6314
	 */
5918
	I915_WRITE(PIPESRC(pipe),
6315
	I915_WRITE(PIPESRC(pipe),
5919
		   ((intel_crtc->config.pipe_src_w - 1) << 16) |
6316
		   ((intel_crtc->config.pipe_src_w - 1) << 16) |
5920
		   (intel_crtc->config.pipe_src_h - 1));
6317
		   (intel_crtc->config.pipe_src_h - 1));
5921
}
6318
}
5922
 
6319
 
5923
static void intel_get_pipe_timings(struct intel_crtc *crtc,
6320
static void intel_get_pipe_timings(struct intel_crtc *crtc,
5924
				   struct intel_crtc_config *pipe_config)
6321
				   struct intel_crtc_config *pipe_config)
5925
{
6322
{
5926
	struct drm_device *dev = crtc->base.dev;
6323
	struct drm_device *dev = crtc->base.dev;
5927
	struct drm_i915_private *dev_priv = dev->dev_private;
6324
	struct drm_i915_private *dev_priv = dev->dev_private;
5928
	enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
6325
	enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
5929
	uint32_t tmp;
6326
	uint32_t tmp;
5930
 
6327
 
5931
	tmp = I915_READ(HTOTAL(cpu_transcoder));
6328
	tmp = I915_READ(HTOTAL(cpu_transcoder));
5932
	pipe_config->adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
6329
	pipe_config->adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
5933
	pipe_config->adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
6330
	pipe_config->adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
5934
	tmp = I915_READ(HBLANK(cpu_transcoder));
6331
	tmp = I915_READ(HBLANK(cpu_transcoder));
5935
	pipe_config->adjusted_mode.crtc_hblank_start = (tmp & 0xffff) + 1;
6332
	pipe_config->adjusted_mode.crtc_hblank_start = (tmp & 0xffff) + 1;
5936
	pipe_config->adjusted_mode.crtc_hblank_end = ((tmp >> 16) & 0xffff) + 1;
6333
	pipe_config->adjusted_mode.crtc_hblank_end = ((tmp >> 16) & 0xffff) + 1;
5937
	tmp = I915_READ(HSYNC(cpu_transcoder));
6334
	tmp = I915_READ(HSYNC(cpu_transcoder));
5938
	pipe_config->adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
6335
	pipe_config->adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
5939
	pipe_config->adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
6336
	pipe_config->adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
5940
 
6337
 
5941
	tmp = I915_READ(VTOTAL(cpu_transcoder));
6338
	tmp = I915_READ(VTOTAL(cpu_transcoder));
5942
	pipe_config->adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
6339
	pipe_config->adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
5943
	pipe_config->adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
6340
	pipe_config->adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
5944
	tmp = I915_READ(VBLANK(cpu_transcoder));
6341
	tmp = I915_READ(VBLANK(cpu_transcoder));
5945
	pipe_config->adjusted_mode.crtc_vblank_start = (tmp & 0xffff) + 1;
6342
	pipe_config->adjusted_mode.crtc_vblank_start = (tmp & 0xffff) + 1;
5946
	pipe_config->adjusted_mode.crtc_vblank_end = ((tmp >> 16) & 0xffff) + 1;
6343
	pipe_config->adjusted_mode.crtc_vblank_end = ((tmp >> 16) & 0xffff) + 1;
5947
	tmp = I915_READ(VSYNC(cpu_transcoder));
6344
	tmp = I915_READ(VSYNC(cpu_transcoder));
5948
	pipe_config->adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
6345
	pipe_config->adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
5949
	pipe_config->adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
6346
	pipe_config->adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
5950
 
6347
 
5951
	if (I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK) {
6348
	if (I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK) {
5952
		pipe_config->adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
6349
		pipe_config->adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
5953
		pipe_config->adjusted_mode.crtc_vtotal += 1;
6350
		pipe_config->adjusted_mode.crtc_vtotal += 1;
5954
		pipe_config->adjusted_mode.crtc_vblank_end += 1;
6351
		pipe_config->adjusted_mode.crtc_vblank_end += 1;
5955
	}
6352
	}
5956
 
6353
 
5957
	tmp = I915_READ(PIPESRC(crtc->pipe));
6354
	tmp = I915_READ(PIPESRC(crtc->pipe));
5958
	pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
6355
	pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
5959
	pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1;
6356
	pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1;
5960
 
6357
 
5961
	pipe_config->requested_mode.vdisplay = pipe_config->pipe_src_h;
6358
	pipe_config->requested_mode.vdisplay = pipe_config->pipe_src_h;
5962
	pipe_config->requested_mode.hdisplay = pipe_config->pipe_src_w;
6359
	pipe_config->requested_mode.hdisplay = pipe_config->pipe_src_w;
5963
}
6360
}
5964
 
6361
 
5965
void intel_mode_from_pipe_config(struct drm_display_mode *mode,
6362
void intel_mode_from_pipe_config(struct drm_display_mode *mode,
5966
					     struct intel_crtc_config *pipe_config)
6363
					     struct intel_crtc_config *pipe_config)
5967
{
6364
{
5968
	mode->hdisplay = pipe_config->adjusted_mode.crtc_hdisplay;
6365
	mode->hdisplay = pipe_config->adjusted_mode.crtc_hdisplay;
5969
	mode->htotal = pipe_config->adjusted_mode.crtc_htotal;
6366
	mode->htotal = pipe_config->adjusted_mode.crtc_htotal;
5970
	mode->hsync_start = pipe_config->adjusted_mode.crtc_hsync_start;
6367
	mode->hsync_start = pipe_config->adjusted_mode.crtc_hsync_start;
5971
	mode->hsync_end = pipe_config->adjusted_mode.crtc_hsync_end;
6368
	mode->hsync_end = pipe_config->adjusted_mode.crtc_hsync_end;
5972
 
6369
 
5973
	mode->vdisplay = pipe_config->adjusted_mode.crtc_vdisplay;
6370
	mode->vdisplay = pipe_config->adjusted_mode.crtc_vdisplay;
5974
	mode->vtotal = pipe_config->adjusted_mode.crtc_vtotal;
6371
	mode->vtotal = pipe_config->adjusted_mode.crtc_vtotal;
5975
	mode->vsync_start = pipe_config->adjusted_mode.crtc_vsync_start;
6372
	mode->vsync_start = pipe_config->adjusted_mode.crtc_vsync_start;
5976
	mode->vsync_end = pipe_config->adjusted_mode.crtc_vsync_end;
6373
	mode->vsync_end = pipe_config->adjusted_mode.crtc_vsync_end;
5977
 
6374
 
5978
	mode->flags = pipe_config->adjusted_mode.flags;
6375
	mode->flags = pipe_config->adjusted_mode.flags;
5979
 
6376
 
5980
	mode->clock = pipe_config->adjusted_mode.crtc_clock;
6377
	mode->clock = pipe_config->adjusted_mode.crtc_clock;
5981
	mode->flags |= pipe_config->adjusted_mode.flags;
6378
	mode->flags |= pipe_config->adjusted_mode.flags;
5982
}
6379
}
5983
 
6380
 
5984
static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
6381
static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
5985
{
6382
{
5986
	struct drm_device *dev = intel_crtc->base.dev;
6383
	struct drm_device *dev = intel_crtc->base.dev;
5987
	struct drm_i915_private *dev_priv = dev->dev_private;
6384
	struct drm_i915_private *dev_priv = dev->dev_private;
5988
	uint32_t pipeconf;
6385
	uint32_t pipeconf;
5989
 
6386
 
5990
	pipeconf = 0;
6387
	pipeconf = 0;
5991
 
6388
 
5992
	if (dev_priv->quirks & QUIRK_PIPEA_FORCE &&
6389
	if ((intel_crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
5993
	    I915_READ(PIPECONF(intel_crtc->pipe)) & PIPECONF_ENABLE)
6390
	    (intel_crtc->pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
5994
		pipeconf |= PIPECONF_ENABLE;
6391
		pipeconf |= I915_READ(PIPECONF(intel_crtc->pipe)) & PIPECONF_ENABLE;
5995
 
6392
 
5996
	if (intel_crtc->config.double_wide)
6393
	if (intel_crtc->config.double_wide)
5997
			pipeconf |= PIPECONF_DOUBLE_WIDE;
6394
			pipeconf |= PIPECONF_DOUBLE_WIDE;
5998
 
6395
 
5999
	/* only g4x and later have fancy bpc/dither controls */
6396
	/* only g4x and later have fancy bpc/dither controls */
6000
	if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) {
6397
	if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) {
6001
		/* Bspec claims that we can't use dithering for 30bpp pipes. */
6398
		/* Bspec claims that we can't use dithering for 30bpp pipes. */
6002
		if (intel_crtc->config.dither && intel_crtc->config.pipe_bpp != 30)
6399
		if (intel_crtc->config.dither && intel_crtc->config.pipe_bpp != 30)
6003
			pipeconf |= PIPECONF_DITHER_EN |
6400
			pipeconf |= PIPECONF_DITHER_EN |
6004
				    PIPECONF_DITHER_TYPE_SP;
6401
				    PIPECONF_DITHER_TYPE_SP;
6005
 
6402
 
6006
		switch (intel_crtc->config.pipe_bpp) {
6403
		switch (intel_crtc->config.pipe_bpp) {
6007
		case 18:
6404
		case 18:
6008
			pipeconf |= PIPECONF_6BPC;
6405
			pipeconf |= PIPECONF_6BPC;
6009
			break;
6406
			break;
6010
		case 24:
6407
		case 24:
6011
			pipeconf |= PIPECONF_8BPC;
6408
			pipeconf |= PIPECONF_8BPC;
6012
			break;
6409
			break;
6013
		case 30:
6410
		case 30:
6014
			pipeconf |= PIPECONF_10BPC;
6411
			pipeconf |= PIPECONF_10BPC;
6015
			break;
6412
			break;
6016
		default:
6413
		default:
6017
			/* Case prevented by intel_choose_pipe_bpp_dither. */
6414
			/* Case prevented by intel_choose_pipe_bpp_dither. */
6018
			BUG();
6415
			BUG();
6019
		}
6416
		}
6020
	}
6417
	}
6021
 
6418
 
6022
	if (HAS_PIPE_CXSR(dev)) {
6419
	if (HAS_PIPE_CXSR(dev)) {
6023
		if (intel_crtc->lowfreq_avail) {
6420
		if (intel_crtc->lowfreq_avail) {
6024
			DRM_DEBUG_KMS("enabling CxSR downclocking\n");
6421
			DRM_DEBUG_KMS("enabling CxSR downclocking\n");
6025
			pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
6422
			pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
6026
		} else {
6423
		} else {
6027
			DRM_DEBUG_KMS("disabling CxSR downclocking\n");
6424
			DRM_DEBUG_KMS("disabling CxSR downclocking\n");
6028
		}
6425
		}
6029
	}
6426
	}
6030
 
6427
 
6031
	if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
6428
	if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
6032
		if (INTEL_INFO(dev)->gen < 4 ||
6429
		if (INTEL_INFO(dev)->gen < 4 ||
6033
		    intel_pipe_has_type(&intel_crtc->base, INTEL_OUTPUT_SDVO))
6430
		    intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO))
6034
		pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
6431
		pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
6035
	else
6432
	else
6036
			pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
6433
			pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
6037
	} else
6434
	} else
6038
		pipeconf |= PIPECONF_PROGRESSIVE;
6435
		pipeconf |= PIPECONF_PROGRESSIVE;
6039
 
6436
 
6040
	if (IS_VALLEYVIEW(dev) && intel_crtc->config.limited_color_range)
6437
	if (IS_VALLEYVIEW(dev) && intel_crtc->config.limited_color_range)
6041
			pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
6438
			pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
6042
 
6439
 
6043
	I915_WRITE(PIPECONF(intel_crtc->pipe), pipeconf);
6440
	I915_WRITE(PIPECONF(intel_crtc->pipe), pipeconf);
6044
	POSTING_READ(PIPECONF(intel_crtc->pipe));
6441
	POSTING_READ(PIPECONF(intel_crtc->pipe));
6045
}
6442
}
6046
 
6443
 
6047
static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
-
 
6048
			      int x, int y,
-
 
6049
			      struct drm_framebuffer *fb)
6444
static int i9xx_crtc_compute_clock(struct intel_crtc *crtc)
6050
{
6445
{
6051
	struct drm_device *dev = crtc->dev;
6446
	struct drm_device *dev = crtc->base.dev;
6052
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
6053
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6447
	struct drm_i915_private *dev_priv = dev->dev_private;
6054
	int refclk, num_connectors = 0;
6448
	int refclk, num_connectors = 0;
6055
	intel_clock_t clock, reduced_clock;
6449
	intel_clock_t clock, reduced_clock;
6056
	bool ok, has_reduced_clock = false;
6450
	bool ok, has_reduced_clock = false;
6057
	bool is_lvds = false, is_dsi = false;
6451
	bool is_lvds = false, is_dsi = false;
6058
	struct intel_encoder *encoder;
6452
	struct intel_encoder *encoder;
6059
	const intel_limit_t *limit;
6453
	const intel_limit_t *limit;
6060
 
6454
 
-
 
6455
	for_each_intel_encoder(dev, encoder) {
-
 
6456
		if (encoder->new_crtc != crtc)
-
 
6457
			continue;
6061
	for_each_encoder_on_crtc(dev, crtc, encoder) {
6458
 
6062
		switch (encoder->type) {
6459
		switch (encoder->type) {
6063
		case INTEL_OUTPUT_LVDS:
6460
		case INTEL_OUTPUT_LVDS:
6064
			is_lvds = true;
6461
			is_lvds = true;
6065
			break;
6462
			break;
6066
		case INTEL_OUTPUT_DSI:
6463
		case INTEL_OUTPUT_DSI:
6067
			is_dsi = true;
6464
			is_dsi = true;
6068
			break;
6465
			break;
-
 
6466
		default:
-
 
6467
			break;
6069
		}
6468
		}
6070
 
6469
 
6071
		num_connectors++;
6470
		num_connectors++;
6072
	}
6471
	}
6073
 
6472
 
6074
	if (is_dsi)
6473
	if (is_dsi)
6075
		return 0;
6474
		return 0;
6076
 
6475
 
6077
	if (!intel_crtc->config.clock_set) {
6476
	if (!crtc->new_config->clock_set) {
6078
	refclk = i9xx_get_refclk(crtc, num_connectors);
6477
	refclk = i9xx_get_refclk(crtc, num_connectors);
6079
 
6478
 
6080
	/*
6479
	/*
6081
		 * Returns a set of divisors for the desired target clock with
6480
		 * Returns a set of divisors for the desired target clock with
6082
		 * the given refclk, or FALSE.  The returned values represent
6481
		 * the given refclk, or FALSE.  The returned values represent
6083
		 * the clock equation: reflck * (5 * (m1 + 2) + (m2 + 2)) / (n +
6482
		 * the clock equation: reflck * (5 * (m1 + 2) + (m2 + 2)) / (n +
6084
		 * 2) / p1 / p2.
6483
		 * 2) / p1 / p2.
6085
	 */
6484
	 */
6086
	limit = intel_limit(crtc, refclk);
6485
	limit = intel_limit(crtc, refclk);
6087
	ok = dev_priv->display.find_dpll(limit, crtc,
6486
	ok = dev_priv->display.find_dpll(limit, crtc,
6088
					 intel_crtc->config.port_clock,
6487
						 crtc->new_config->port_clock,
6089
					 refclk, NULL, &clock);
6488
					 refclk, NULL, &clock);
6090
		if (!ok) {
6489
		if (!ok) {
6091
		DRM_ERROR("Couldn't find PLL settings for mode!\n");
6490
		DRM_ERROR("Couldn't find PLL settings for mode!\n");
6092
		return -EINVAL;
6491
		return -EINVAL;
6093
	}
6492
	}
6094
 
6493
 
6095
	if (is_lvds && dev_priv->lvds_downclock_avail) {
6494
	if (is_lvds && dev_priv->lvds_downclock_avail) {
6096
		/*
6495
		/*
6097
			 * Ensure we match the reduced clock's P to the target
6496
			 * Ensure we match the reduced clock's P to the target
6098
			 * clock.  If the clocks don't match, we can't switch
6497
			 * clock.  If the clocks don't match, we can't switch
6099
			 * the display clock by using the FP0/FP1. In such case
6498
			 * the display clock by using the FP0/FP1. In such case
6100
			 * we will disable the LVDS downclock feature.
6499
			 * we will disable the LVDS downclock feature.
6101
		*/
6500
		*/
6102
		has_reduced_clock =
6501
		has_reduced_clock =
6103
			dev_priv->display.find_dpll(limit, crtc,
6502
			dev_priv->display.find_dpll(limit, crtc,
6104
						    dev_priv->lvds_downclock,
6503
						    dev_priv->lvds_downclock,
6105
						    refclk, &clock,
6504
						    refclk, &clock,
6106
						    &reduced_clock);
6505
						    &reduced_clock);
6107
	}
6506
	}
6108
	/* Compat-code for transition, will disappear. */
6507
	/* Compat-code for transition, will disappear. */
6109
		intel_crtc->config.dpll.n = clock.n;
6508
		crtc->new_config->dpll.n = clock.n;
6110
		intel_crtc->config.dpll.m1 = clock.m1;
6509
		crtc->new_config->dpll.m1 = clock.m1;
6111
		intel_crtc->config.dpll.m2 = clock.m2;
6510
		crtc->new_config->dpll.m2 = clock.m2;
6112
		intel_crtc->config.dpll.p1 = clock.p1;
6511
		crtc->new_config->dpll.p1 = clock.p1;
6113
		intel_crtc->config.dpll.p2 = clock.p2;
6512
		crtc->new_config->dpll.p2 = clock.p2;
6114
	}
6513
	}
6115
 
6514
 
6116
	if (IS_GEN2(dev)) {
6515
	if (IS_GEN2(dev)) {
6117
		i8xx_update_pll(intel_crtc,
6516
		i8xx_update_pll(crtc,
6118
				has_reduced_clock ? &reduced_clock : NULL,
6517
				has_reduced_clock ? &reduced_clock : NULL,
6119
				num_connectors);
6518
				num_connectors);
6120
	} else if (IS_CHERRYVIEW(dev)) {
6519
	} else if (IS_CHERRYVIEW(dev)) {
6121
		chv_update_pll(intel_crtc);
6520
		chv_update_pll(crtc, crtc->new_config);
6122
	} else if (IS_VALLEYVIEW(dev)) {
6521
	} else if (IS_VALLEYVIEW(dev)) {
6123
		vlv_update_pll(intel_crtc);
6522
		vlv_update_pll(crtc, crtc->new_config);
6124
	} else {
6523
	} else {
6125
		i9xx_update_pll(intel_crtc,
6524
		i9xx_update_pll(crtc,
6126
				has_reduced_clock ? &reduced_clock : NULL,
6525
				has_reduced_clock ? &reduced_clock : NULL,
6127
				num_connectors);
6526
				num_connectors);
6128
	}
6527
	}
6129
 
6528
 
6130
	return 0;
6529
	return 0;
6131
}
6530
}
6132
 
6531
 
6133
static void i9xx_get_pfit_config(struct intel_crtc *crtc,
6532
static void i9xx_get_pfit_config(struct intel_crtc *crtc,
6134
				 struct intel_crtc_config *pipe_config)
6533
				 struct intel_crtc_config *pipe_config)
6135
{
6534
{
6136
	struct drm_device *dev = crtc->base.dev;
6535
	struct drm_device *dev = crtc->base.dev;
6137
	struct drm_i915_private *dev_priv = dev->dev_private;
6536
	struct drm_i915_private *dev_priv = dev->dev_private;
6138
	uint32_t tmp;
6537
	uint32_t tmp;
6139
 
6538
 
6140
	if (INTEL_INFO(dev)->gen <= 3 && (IS_I830(dev) || !IS_MOBILE(dev)))
6539
	if (INTEL_INFO(dev)->gen <= 3 && (IS_I830(dev) || !IS_MOBILE(dev)))
6141
		return;
6540
		return;
6142
 
6541
 
6143
	tmp = I915_READ(PFIT_CONTROL);
6542
	tmp = I915_READ(PFIT_CONTROL);
6144
	if (!(tmp & PFIT_ENABLE))
6543
	if (!(tmp & PFIT_ENABLE))
6145
		return;
6544
		return;
6146
 
6545
 
6147
	/* Check whether the pfit is attached to our pipe. */
6546
	/* Check whether the pfit is attached to our pipe. */
6148
	if (INTEL_INFO(dev)->gen < 4) {
6547
	if (INTEL_INFO(dev)->gen < 4) {
6149
		if (crtc->pipe != PIPE_B)
6548
		if (crtc->pipe != PIPE_B)
6150
			return;
6549
			return;
6151
	} else {
6550
	} else {
6152
		if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
6551
		if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
6153
			return;
6552
			return;
6154
	}
6553
	}
6155
 
6554
 
6156
	pipe_config->gmch_pfit.control = tmp;
6555
	pipe_config->gmch_pfit.control = tmp;
6157
	pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS);
6556
	pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS);
6158
	if (INTEL_INFO(dev)->gen < 5)
6557
	if (INTEL_INFO(dev)->gen < 5)
6159
		pipe_config->gmch_pfit.lvds_border_bits =
6558
		pipe_config->gmch_pfit.lvds_border_bits =
6160
			I915_READ(LVDS) & LVDS_BORDER_ENABLE;
6559
			I915_READ(LVDS) & LVDS_BORDER_ENABLE;
6161
}
6560
}
6162
 
6561
 
6163
static void vlv_crtc_clock_get(struct intel_crtc *crtc,
6562
static void vlv_crtc_clock_get(struct intel_crtc *crtc,
6164
			       struct intel_crtc_config *pipe_config)
6563
			       struct intel_crtc_config *pipe_config)
6165
{
6564
{
6166
	struct drm_device *dev = crtc->base.dev;
6565
	struct drm_device *dev = crtc->base.dev;
6167
	struct drm_i915_private *dev_priv = dev->dev_private;
6566
	struct drm_i915_private *dev_priv = dev->dev_private;
6168
	int pipe = pipe_config->cpu_transcoder;
6567
	int pipe = pipe_config->cpu_transcoder;
6169
	intel_clock_t clock;
6568
	intel_clock_t clock;
6170
	u32 mdiv;
6569
	u32 mdiv;
6171
	int refclk = 100000;
6570
	int refclk = 100000;
6172
 
6571
 
6173
	/* In case of MIPI DPLL will not even be used */
6572
	/* In case of MIPI DPLL will not even be used */
6174
	if (!(pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE))
6573
	if (!(pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE))
6175
		return;
6574
		return;
6176
 
6575
 
6177
	mutex_lock(&dev_priv->dpio_lock);
6576
	mutex_lock(&dev_priv->dpio_lock);
6178
	mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
6577
	mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
6179
	mutex_unlock(&dev_priv->dpio_lock);
6578
	mutex_unlock(&dev_priv->dpio_lock);
6180
 
6579
 
6181
	clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
6580
	clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
6182
	clock.m2 = mdiv & DPIO_M2DIV_MASK;
6581
	clock.m2 = mdiv & DPIO_M2DIV_MASK;
6183
	clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
6582
	clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
6184
	clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
6583
	clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
6185
	clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
6584
	clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
6186
 
6585
 
6187
	vlv_clock(refclk, &clock);
6586
	vlv_clock(refclk, &clock);
6188
 
6587
 
6189
	/* clock.dot is the fast clock */
6588
	/* clock.dot is the fast clock */
6190
	pipe_config->port_clock = clock.dot / 5;
6589
	pipe_config->port_clock = clock.dot / 5;
6191
}
6590
}
6192
 
6591
 
6193
static void i9xx_get_plane_config(struct intel_crtc *crtc,
6592
static void i9xx_get_plane_config(struct intel_crtc *crtc,
6194
				  struct intel_plane_config *plane_config)
6593
				  struct intel_plane_config *plane_config)
6195
{
6594
{
6196
	struct drm_device *dev = crtc->base.dev;
6595
	struct drm_device *dev = crtc->base.dev;
6197
	struct drm_i915_private *dev_priv = dev->dev_private;
6596
	struct drm_i915_private *dev_priv = dev->dev_private;
6198
	u32 val, base, offset;
6597
	u32 val, base, offset;
6199
	int pipe = crtc->pipe, plane = crtc->plane;
6598
	int pipe = crtc->pipe, plane = crtc->plane;
6200
	int fourcc, pixel_format;
6599
	int fourcc, pixel_format;
6201
	int aligned_height;
6600
	int aligned_height;
6202
 
6601
 
6203
	crtc->base.primary->fb = kzalloc(sizeof(struct intel_framebuffer), GFP_KERNEL);
6602
	crtc->base.primary->fb = kzalloc(sizeof(struct intel_framebuffer), GFP_KERNEL);
6204
	if (!crtc->base.primary->fb) {
6603
	if (!crtc->base.primary->fb) {
6205
		DRM_DEBUG_KMS("failed to alloc fb\n");
6604
		DRM_DEBUG_KMS("failed to alloc fb\n");
6206
		return;
6605
		return;
6207
	}
6606
	}
6208
 
6607
 
6209
	val = I915_READ(DSPCNTR(plane));
6608
	val = I915_READ(DSPCNTR(plane));
6210
 
6609
 
6211
	if (INTEL_INFO(dev)->gen >= 4)
6610
	if (INTEL_INFO(dev)->gen >= 4)
6212
		if (val & DISPPLANE_TILED)
6611
		if (val & DISPPLANE_TILED)
6213
			plane_config->tiled = true;
6612
			plane_config->tiled = true;
6214
 
6613
 
6215
	pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
6614
	pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
6216
	fourcc = intel_format_to_fourcc(pixel_format);
6615
	fourcc = intel_format_to_fourcc(pixel_format);
6217
	crtc->base.primary->fb->pixel_format = fourcc;
6616
	crtc->base.primary->fb->pixel_format = fourcc;
6218
	crtc->base.primary->fb->bits_per_pixel =
6617
	crtc->base.primary->fb->bits_per_pixel =
6219
		drm_format_plane_cpp(fourcc, 0) * 8;
6618
		drm_format_plane_cpp(fourcc, 0) * 8;
6220
 
6619
 
6221
	if (INTEL_INFO(dev)->gen >= 4) {
6620
	if (INTEL_INFO(dev)->gen >= 4) {
6222
		if (plane_config->tiled)
6621
		if (plane_config->tiled)
6223
			offset = I915_READ(DSPTILEOFF(plane));
6622
			offset = I915_READ(DSPTILEOFF(plane));
6224
		else
6623
		else
6225
			offset = I915_READ(DSPLINOFF(plane));
6624
			offset = I915_READ(DSPLINOFF(plane));
6226
		base = I915_READ(DSPSURF(plane)) & 0xfffff000;
6625
		base = I915_READ(DSPSURF(plane)) & 0xfffff000;
6227
	} else {
6626
	} else {
6228
		base = I915_READ(DSPADDR(plane));
6627
		base = I915_READ(DSPADDR(plane));
6229
	}
6628
	}
6230
	plane_config->base = base;
6629
	plane_config->base = base;
6231
 
6630
 
6232
	val = I915_READ(PIPESRC(pipe));
6631
	val = I915_READ(PIPESRC(pipe));
6233
	crtc->base.primary->fb->width = ((val >> 16) & 0xfff) + 1;
6632
	crtc->base.primary->fb->width = ((val >> 16) & 0xfff) + 1;
6234
	crtc->base.primary->fb->height = ((val >> 0) & 0xfff) + 1;
6633
	crtc->base.primary->fb->height = ((val >> 0) & 0xfff) + 1;
6235
 
6634
 
6236
	val = I915_READ(DSPSTRIDE(pipe));
6635
	val = I915_READ(DSPSTRIDE(pipe));
6237
	crtc->base.primary->fb->pitches[0] = val & 0xffffff80;
6636
	crtc->base.primary->fb->pitches[0] = val & 0xffffffc0;
6238
 
6637
 
6239
	aligned_height = intel_align_height(dev, crtc->base.primary->fb->height,
6638
	aligned_height = intel_align_height(dev, crtc->base.primary->fb->height,
6240
					    plane_config->tiled);
6639
					    plane_config->tiled);
6241
 
6640
 
6242
	plane_config->size = 16*1024*1024;
6641
	plane_config->size = 16*1024*1024;
6243
 
6642
 
6244
 
6643
 
6245
	DRM_DEBUG_KMS("pipe/plane %d/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
6644
	DRM_DEBUG_KMS("pipe/plane %d/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
6246
		      pipe, plane, crtc->base.primary->fb->width,
6645
		      pipe, plane, crtc->base.primary->fb->width,
6247
		      crtc->base.primary->fb->height,
6646
		      crtc->base.primary->fb->height,
6248
		      crtc->base.primary->fb->bits_per_pixel, base,
6647
		      crtc->base.primary->fb->bits_per_pixel, base,
6249
		      crtc->base.primary->fb->pitches[0],
6648
		      crtc->base.primary->fb->pitches[0],
6250
		      plane_config->size);
6649
		      plane_config->size);
6251
 
6650
 
6252
}
6651
}
6253
 
6652
 
6254
static void chv_crtc_clock_get(struct intel_crtc *crtc,
6653
static void chv_crtc_clock_get(struct intel_crtc *crtc,
6255
			       struct intel_crtc_config *pipe_config)
6654
			       struct intel_crtc_config *pipe_config)
6256
{
6655
{
6257
	struct drm_device *dev = crtc->base.dev;
6656
	struct drm_device *dev = crtc->base.dev;
6258
	struct drm_i915_private *dev_priv = dev->dev_private;
6657
	struct drm_i915_private *dev_priv = dev->dev_private;
6259
	int pipe = pipe_config->cpu_transcoder;
6658
	int pipe = pipe_config->cpu_transcoder;
6260
	enum dpio_channel port = vlv_pipe_to_channel(pipe);
6659
	enum dpio_channel port = vlv_pipe_to_channel(pipe);
6261
	intel_clock_t clock;
6660
	intel_clock_t clock;
6262
	u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2;
6661
	u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2;
6263
	int refclk = 100000;
6662
	int refclk = 100000;
6264
 
6663
 
6265
	mutex_lock(&dev_priv->dpio_lock);
6664
	mutex_lock(&dev_priv->dpio_lock);
6266
	cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port));
6665
	cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port));
6267
	pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
6666
	pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
6268
	pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
6667
	pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
6269
	pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
6668
	pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
6270
	mutex_unlock(&dev_priv->dpio_lock);
6669
	mutex_unlock(&dev_priv->dpio_lock);
6271
 
6670
 
6272
	clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
6671
	clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
6273
	clock.m2 = ((pll_dw0 & 0xff) << 22) | (pll_dw2 & 0x3fffff);
6672
	clock.m2 = ((pll_dw0 & 0xff) << 22) | (pll_dw2 & 0x3fffff);
6274
	clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
6673
	clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
6275
	clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
6674
	clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
6276
	clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
6675
	clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
6277
 
6676
 
6278
	chv_clock(refclk, &clock);
6677
	chv_clock(refclk, &clock);
6279
 
6678
 
6280
	/* clock.dot is the fast clock */
6679
	/* clock.dot is the fast clock */
6281
	pipe_config->port_clock = clock.dot / 5;
6680
	pipe_config->port_clock = clock.dot / 5;
6282
}
6681
}
6283
 
6682
 
6284
static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
6683
static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
6285
				 struct intel_crtc_config *pipe_config)
6684
				 struct intel_crtc_config *pipe_config)
6286
{
6685
{
6287
	struct drm_device *dev = crtc->base.dev;
6686
	struct drm_device *dev = crtc->base.dev;
6288
	struct drm_i915_private *dev_priv = dev->dev_private;
6687
	struct drm_i915_private *dev_priv = dev->dev_private;
6289
	uint32_t tmp;
6688
	uint32_t tmp;
6290
 
6689
 
6291
	if (!intel_display_power_enabled(dev_priv,
6690
	if (!intel_display_power_is_enabled(dev_priv,
6292
					 POWER_DOMAIN_PIPE(crtc->pipe)))
6691
					 POWER_DOMAIN_PIPE(crtc->pipe)))
6293
		return false;
6692
		return false;
6294
 
6693
 
6295
	pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
6694
	pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
6296
	pipe_config->shared_dpll = DPLL_ID_PRIVATE;
6695
	pipe_config->shared_dpll = DPLL_ID_PRIVATE;
6297
 
6696
 
6298
	tmp = I915_READ(PIPECONF(crtc->pipe));
6697
	tmp = I915_READ(PIPECONF(crtc->pipe));
6299
	if (!(tmp & PIPECONF_ENABLE))
6698
	if (!(tmp & PIPECONF_ENABLE))
6300
		return false;
6699
		return false;
6301
 
6700
 
6302
	if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) {
6701
	if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) {
6303
		switch (tmp & PIPECONF_BPC_MASK) {
6702
		switch (tmp & PIPECONF_BPC_MASK) {
6304
		case PIPECONF_6BPC:
6703
		case PIPECONF_6BPC:
6305
			pipe_config->pipe_bpp = 18;
6704
			pipe_config->pipe_bpp = 18;
6306
			break;
6705
			break;
6307
		case PIPECONF_8BPC:
6706
		case PIPECONF_8BPC:
6308
			pipe_config->pipe_bpp = 24;
6707
			pipe_config->pipe_bpp = 24;
6309
			break;
6708
			break;
6310
		case PIPECONF_10BPC:
6709
		case PIPECONF_10BPC:
6311
			pipe_config->pipe_bpp = 30;
6710
			pipe_config->pipe_bpp = 30;
6312
			break;
6711
			break;
6313
		default:
6712
		default:
6314
			break;
6713
			break;
6315
		}
6714
		}
6316
	}
6715
	}
6317
 
6716
 
6318
	if (IS_VALLEYVIEW(dev) && (tmp & PIPECONF_COLOR_RANGE_SELECT))
6717
	if (IS_VALLEYVIEW(dev) && (tmp & PIPECONF_COLOR_RANGE_SELECT))
6319
		pipe_config->limited_color_range = true;
6718
		pipe_config->limited_color_range = true;
6320
 
6719
 
6321
	if (INTEL_INFO(dev)->gen < 4)
6720
	if (INTEL_INFO(dev)->gen < 4)
6322
		pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
6721
		pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
6323
 
6722
 
6324
	intel_get_pipe_timings(crtc, pipe_config);
6723
	intel_get_pipe_timings(crtc, pipe_config);
6325
 
6724
 
6326
	i9xx_get_pfit_config(crtc, pipe_config);
6725
	i9xx_get_pfit_config(crtc, pipe_config);
6327
 
6726
 
6328
	if (INTEL_INFO(dev)->gen >= 4) {
6727
	if (INTEL_INFO(dev)->gen >= 4) {
6329
		tmp = I915_READ(DPLL_MD(crtc->pipe));
6728
		tmp = I915_READ(DPLL_MD(crtc->pipe));
6330
		pipe_config->pixel_multiplier =
6729
		pipe_config->pixel_multiplier =
6331
			((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
6730
			((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
6332
			 >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
6731
			 >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
6333
		pipe_config->dpll_hw_state.dpll_md = tmp;
6732
		pipe_config->dpll_hw_state.dpll_md = tmp;
6334
	} else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
6733
	} else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
6335
		tmp = I915_READ(DPLL(crtc->pipe));
6734
		tmp = I915_READ(DPLL(crtc->pipe));
6336
		pipe_config->pixel_multiplier =
6735
		pipe_config->pixel_multiplier =
6337
			((tmp & SDVO_MULTIPLIER_MASK)
6736
			((tmp & SDVO_MULTIPLIER_MASK)
6338
			 >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
6737
			 >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
6339
	} else {
6738
	} else {
6340
		/* Note that on i915G/GM the pixel multiplier is in the sdvo
6739
		/* Note that on i915G/GM the pixel multiplier is in the sdvo
6341
		 * port and will be fixed up in the encoder->get_config
6740
		 * port and will be fixed up in the encoder->get_config
6342
		 * function. */
6741
		 * function. */
6343
		pipe_config->pixel_multiplier = 1;
6742
		pipe_config->pixel_multiplier = 1;
6344
	}
6743
	}
6345
	pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe));
6744
	pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe));
6346
	if (!IS_VALLEYVIEW(dev)) {
6745
	if (!IS_VALLEYVIEW(dev)) {
-
 
6746
		/*
-
 
6747
		 * DPLL_DVO_2X_MODE must be enabled for both DPLLs
-
 
6748
		 * on 830. Filter it out here so that we don't
-
 
6749
		 * report errors due to that.
-
 
6750
		 */
-
 
6751
		if (IS_I830(dev))
-
 
6752
			pipe_config->dpll_hw_state.dpll &= ~DPLL_DVO_2X_MODE;
-
 
6753
 
6347
		pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe));
6754
		pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe));
6348
		pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe));
6755
		pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe));
6349
	} else {
6756
	} else {
6350
		/* Mask out read-only status bits. */
6757
		/* Mask out read-only status bits. */
6351
		pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
6758
		pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
6352
						     DPLL_PORTC_READY_MASK |
6759
						     DPLL_PORTC_READY_MASK |
6353
						     DPLL_PORTB_READY_MASK);
6760
						     DPLL_PORTB_READY_MASK);
6354
	}
6761
	}
6355
 
6762
 
6356
	if (IS_CHERRYVIEW(dev))
6763
	if (IS_CHERRYVIEW(dev))
6357
		chv_crtc_clock_get(crtc, pipe_config);
6764
		chv_crtc_clock_get(crtc, pipe_config);
6358
	else if (IS_VALLEYVIEW(dev))
6765
	else if (IS_VALLEYVIEW(dev))
6359
		vlv_crtc_clock_get(crtc, pipe_config);
6766
		vlv_crtc_clock_get(crtc, pipe_config);
6360
	else
6767
	else
6361
		i9xx_crtc_clock_get(crtc, pipe_config);
6768
		i9xx_crtc_clock_get(crtc, pipe_config);
6362
 
6769
 
6363
	return true;
6770
	return true;
6364
}
6771
}
6365
 
6772
 
6366
static void ironlake_init_pch_refclk(struct drm_device *dev)
6773
static void ironlake_init_pch_refclk(struct drm_device *dev)
6367
{
6774
{
6368
	struct drm_i915_private *dev_priv = dev->dev_private;
6775
	struct drm_i915_private *dev_priv = dev->dev_private;
6369
	struct drm_mode_config *mode_config = &dev->mode_config;
-
 
6370
	struct intel_encoder *encoder;
6776
	struct intel_encoder *encoder;
6371
	u32 val, final;
6777
	u32 val, final;
6372
	bool has_lvds = false;
6778
	bool has_lvds = false;
6373
	bool has_cpu_edp = false;
6779
	bool has_cpu_edp = false;
6374
	bool has_panel = false;
6780
	bool has_panel = false;
6375
	bool has_ck505 = false;
6781
	bool has_ck505 = false;
6376
	bool can_ssc = false;
6782
	bool can_ssc = false;
6377
 
6783
 
6378
	/* We need to take the global config into account */
6784
	/* We need to take the global config into account */
6379
		list_for_each_entry(encoder, &mode_config->encoder_list,
6785
	for_each_intel_encoder(dev, encoder) {
6380
				    base.head) {
-
 
6381
			switch (encoder->type) {
6786
			switch (encoder->type) {
6382
			case INTEL_OUTPUT_LVDS:
6787
			case INTEL_OUTPUT_LVDS:
6383
			has_panel = true;
6788
			has_panel = true;
6384
				has_lvds = true;
6789
				has_lvds = true;
6385
			break;
6790
			break;
6386
			case INTEL_OUTPUT_EDP:
6791
			case INTEL_OUTPUT_EDP:
6387
			has_panel = true;
6792
			has_panel = true;
6388
			if (enc_to_dig_port(&encoder->base)->port == PORT_A)
6793
			if (enc_to_dig_port(&encoder->base)->port == PORT_A)
6389
				has_cpu_edp = true;
6794
				has_cpu_edp = true;
6390
				break;
6795
				break;
-
 
6796
		default:
-
 
6797
			break;
6391
			}
6798
			}
6392
		}
6799
		}
6393
 
6800
 
6394
	if (HAS_PCH_IBX(dev)) {
6801
	if (HAS_PCH_IBX(dev)) {
6395
		has_ck505 = dev_priv->vbt.display_clock_mode;
6802
		has_ck505 = dev_priv->vbt.display_clock_mode;
6396
		can_ssc = has_ck505;
6803
		can_ssc = has_ck505;
6397
	} else {
6804
	} else {
6398
		has_ck505 = false;
6805
		has_ck505 = false;
6399
		can_ssc = true;
6806
		can_ssc = true;
6400
	}
6807
	}
6401
 
6808
 
6402
	DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d\n",
6809
	DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d\n",
6403
		      has_panel, has_lvds, has_ck505);
6810
		      has_panel, has_lvds, has_ck505);
6404
 
6811
 
6405
	/* Ironlake: try to setup display ref clock before DPLL
6812
	/* Ironlake: try to setup display ref clock before DPLL
6406
	 * enabling. This is only under driver's control after
6813
	 * enabling. This is only under driver's control after
6407
	 * PCH B stepping, previous chipset stepping should be
6814
	 * PCH B stepping, previous chipset stepping should be
6408
	 * ignoring this setting.
6815
	 * ignoring this setting.
6409
	 */
6816
	 */
6410
	val = I915_READ(PCH_DREF_CONTROL);
6817
	val = I915_READ(PCH_DREF_CONTROL);
6411
 
6818
 
6412
	/* As we must carefully and slowly disable/enable each source in turn,
6819
	/* As we must carefully and slowly disable/enable each source in turn,
6413
	 * compute the final state we want first and check if we need to
6820
	 * compute the final state we want first and check if we need to
6414
	 * make any changes at all.
6821
	 * make any changes at all.
6415
	 */
6822
	 */
6416
	final = val;
6823
	final = val;
6417
	final &= ~DREF_NONSPREAD_SOURCE_MASK;
6824
	final &= ~DREF_NONSPREAD_SOURCE_MASK;
6418
	if (has_ck505)
6825
	if (has_ck505)
6419
		final |= DREF_NONSPREAD_CK505_ENABLE;
6826
		final |= DREF_NONSPREAD_CK505_ENABLE;
6420
	else
6827
	else
6421
		final |= DREF_NONSPREAD_SOURCE_ENABLE;
6828
		final |= DREF_NONSPREAD_SOURCE_ENABLE;
6422
 
6829
 
6423
	final &= ~DREF_SSC_SOURCE_MASK;
6830
	final &= ~DREF_SSC_SOURCE_MASK;
6424
	final &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
6831
	final &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
6425
	final &= ~DREF_SSC1_ENABLE;
6832
	final &= ~DREF_SSC1_ENABLE;
6426
 
6833
 
6427
	if (has_panel) {
6834
	if (has_panel) {
6428
		final |= DREF_SSC_SOURCE_ENABLE;
6835
		final |= DREF_SSC_SOURCE_ENABLE;
6429
 
6836
 
6430
		if (intel_panel_use_ssc(dev_priv) && can_ssc)
6837
		if (intel_panel_use_ssc(dev_priv) && can_ssc)
6431
			final |= DREF_SSC1_ENABLE;
6838
			final |= DREF_SSC1_ENABLE;
6432
 
6839
 
6433
		if (has_cpu_edp) {
6840
		if (has_cpu_edp) {
6434
			if (intel_panel_use_ssc(dev_priv) && can_ssc)
6841
			if (intel_panel_use_ssc(dev_priv) && can_ssc)
6435
				final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
6842
				final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
6436
			else
6843
			else
6437
				final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
6844
				final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
6438
		} else
6845
		} else
6439
			final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
6846
			final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
6440
	} else {
6847
	} else {
6441
		final |= DREF_SSC_SOURCE_DISABLE;
6848
		final |= DREF_SSC_SOURCE_DISABLE;
6442
		final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
6849
		final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
6443
	}
6850
	}
6444
 
6851
 
6445
	if (final == val)
6852
	if (final == val)
6446
		return;
6853
		return;
6447
 
6854
 
6448
	/* Always enable nonspread source */
6855
	/* Always enable nonspread source */
6449
	val &= ~DREF_NONSPREAD_SOURCE_MASK;
6856
	val &= ~DREF_NONSPREAD_SOURCE_MASK;
6450
 
6857
 
6451
	if (has_ck505)
6858
	if (has_ck505)
6452
		val |= DREF_NONSPREAD_CK505_ENABLE;
6859
		val |= DREF_NONSPREAD_CK505_ENABLE;
6453
	else
6860
	else
6454
		val |= DREF_NONSPREAD_SOURCE_ENABLE;
6861
		val |= DREF_NONSPREAD_SOURCE_ENABLE;
6455
 
6862
 
6456
	if (has_panel) {
6863
	if (has_panel) {
6457
		val &= ~DREF_SSC_SOURCE_MASK;
6864
		val &= ~DREF_SSC_SOURCE_MASK;
6458
		val |= DREF_SSC_SOURCE_ENABLE;
6865
		val |= DREF_SSC_SOURCE_ENABLE;
6459
 
6866
 
6460
		/* SSC must be turned on before enabling the CPU output  */
6867
		/* SSC must be turned on before enabling the CPU output  */
6461
		if (intel_panel_use_ssc(dev_priv) && can_ssc) {
6868
		if (intel_panel_use_ssc(dev_priv) && can_ssc) {
6462
			DRM_DEBUG_KMS("Using SSC on panel\n");
6869
			DRM_DEBUG_KMS("Using SSC on panel\n");
6463
			val |= DREF_SSC1_ENABLE;
6870
			val |= DREF_SSC1_ENABLE;
6464
		} else
6871
		} else
6465
			val &= ~DREF_SSC1_ENABLE;
6872
			val &= ~DREF_SSC1_ENABLE;
6466
 
6873
 
6467
		/* Get SSC going before enabling the outputs */
6874
		/* Get SSC going before enabling the outputs */
6468
		I915_WRITE(PCH_DREF_CONTROL, val);
6875
		I915_WRITE(PCH_DREF_CONTROL, val);
6469
			POSTING_READ(PCH_DREF_CONTROL);
6876
			POSTING_READ(PCH_DREF_CONTROL);
6470
			udelay(200);
6877
			udelay(200);
6471
 
6878
 
6472
		val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
6879
		val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
6473
 
6880
 
6474
		/* Enable CPU source on CPU attached eDP */
6881
		/* Enable CPU source on CPU attached eDP */
6475
		if (has_cpu_edp) {
6882
		if (has_cpu_edp) {
6476
			if (intel_panel_use_ssc(dev_priv) && can_ssc) {
6883
			if (intel_panel_use_ssc(dev_priv) && can_ssc) {
6477
				DRM_DEBUG_KMS("Using SSC on eDP\n");
6884
				DRM_DEBUG_KMS("Using SSC on eDP\n");
6478
				val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
6885
				val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
6479
			} else
6886
			} else
6480
				val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
6887
				val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
6481
		} else
6888
		} else
6482
			val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
6889
			val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
6483
 
6890
 
6484
		I915_WRITE(PCH_DREF_CONTROL, val);
6891
		I915_WRITE(PCH_DREF_CONTROL, val);
6485
		POSTING_READ(PCH_DREF_CONTROL);
6892
		POSTING_READ(PCH_DREF_CONTROL);
6486
		udelay(200);
6893
		udelay(200);
6487
		} else {
6894
		} else {
6488
		DRM_DEBUG_KMS("Disabling SSC entirely\n");
6895
		DRM_DEBUG_KMS("Disabling SSC entirely\n");
6489
 
6896
 
6490
		val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
6897
		val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
6491
 
6898
 
6492
		/* Turn off CPU output */
6899
		/* Turn off CPU output */
6493
		val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
6900
		val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
6494
 
6901
 
6495
		I915_WRITE(PCH_DREF_CONTROL, val);
6902
		I915_WRITE(PCH_DREF_CONTROL, val);
6496
		POSTING_READ(PCH_DREF_CONTROL);
6903
		POSTING_READ(PCH_DREF_CONTROL);
6497
		udelay(200);
6904
		udelay(200);
6498
 
6905
 
6499
		/* Turn off the SSC source */
6906
		/* Turn off the SSC source */
6500
		val &= ~DREF_SSC_SOURCE_MASK;
6907
		val &= ~DREF_SSC_SOURCE_MASK;
6501
		val |= DREF_SSC_SOURCE_DISABLE;
6908
		val |= DREF_SSC_SOURCE_DISABLE;
6502
 
6909
 
6503
		/* Turn off SSC1 */
6910
		/* Turn off SSC1 */
6504
		val &= ~DREF_SSC1_ENABLE;
6911
		val &= ~DREF_SSC1_ENABLE;
6505
 
6912
 
6506
		I915_WRITE(PCH_DREF_CONTROL, val);
6913
		I915_WRITE(PCH_DREF_CONTROL, val);
6507
		POSTING_READ(PCH_DREF_CONTROL);
6914
		POSTING_READ(PCH_DREF_CONTROL);
6508
		udelay(200);
6915
		udelay(200);
6509
	}
6916
	}
6510
 
6917
 
6511
	BUG_ON(val != final);
6918
	BUG_ON(val != final);
6512
}
6919
}
6513
 
6920
 
6514
static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
6921
static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
6515
{
6922
{
6516
	uint32_t tmp;
6923
	uint32_t tmp;
6517
 
6924
 
6518
		tmp = I915_READ(SOUTH_CHICKEN2);
6925
		tmp = I915_READ(SOUTH_CHICKEN2);
6519
		tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
6926
		tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
6520
		I915_WRITE(SOUTH_CHICKEN2, tmp);
6927
		I915_WRITE(SOUTH_CHICKEN2, tmp);
6521
 
6928
 
6522
		if (wait_for_atomic_us(I915_READ(SOUTH_CHICKEN2) &
6929
		if (wait_for_atomic_us(I915_READ(SOUTH_CHICKEN2) &
6523
				       FDI_MPHY_IOSFSB_RESET_STATUS, 100))
6930
				       FDI_MPHY_IOSFSB_RESET_STATUS, 100))
6524
			DRM_ERROR("FDI mPHY reset assert timeout\n");
6931
			DRM_ERROR("FDI mPHY reset assert timeout\n");
6525
 
6932
 
6526
		tmp = I915_READ(SOUTH_CHICKEN2);
6933
		tmp = I915_READ(SOUTH_CHICKEN2);
6527
		tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
6934
		tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
6528
		I915_WRITE(SOUTH_CHICKEN2, tmp);
6935
		I915_WRITE(SOUTH_CHICKEN2, tmp);
6529
 
6936
 
6530
		if (wait_for_atomic_us((I915_READ(SOUTH_CHICKEN2) &
6937
		if (wait_for_atomic_us((I915_READ(SOUTH_CHICKEN2) &
6531
				FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
6938
				FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
6532
			DRM_ERROR("FDI mPHY reset de-assert timeout\n");
6939
			DRM_ERROR("FDI mPHY reset de-assert timeout\n");
6533
}
6940
}
6534
 
6941
 
6535
/* WaMPhyProgramming:hsw */
6942
/* WaMPhyProgramming:hsw */
6536
static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
6943
static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
6537
{
6944
{
6538
	uint32_t tmp;
6945
	uint32_t tmp;
6539
 
6946
 
6540
	tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
6947
	tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
6541
	tmp &= ~(0xFF << 24);
6948
	tmp &= ~(0xFF << 24);
6542
	tmp |= (0x12 << 24);
6949
	tmp |= (0x12 << 24);
6543
	intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
6950
	intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
6544
 
6951
 
6545
	tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
6952
	tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
6546
	tmp |= (1 << 11);
6953
	tmp |= (1 << 11);
6547
	intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
6954
	intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
6548
 
6955
 
6549
	tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
6956
	tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
6550
	tmp |= (1 << 11);
6957
	tmp |= (1 << 11);
6551
	intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
6958
	intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
6552
 
6959
 
6553
	tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
6960
	tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
6554
	tmp |= (1 << 24) | (1 << 21) | (1 << 18);
6961
	tmp |= (1 << 24) | (1 << 21) | (1 << 18);
6555
	intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
6962
	intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
6556
 
6963
 
6557
	tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
6964
	tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
6558
	tmp |= (1 << 24) | (1 << 21) | (1 << 18);
6965
	tmp |= (1 << 24) | (1 << 21) | (1 << 18);
6559
	intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
6966
	intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
6560
 
6967
 
6561
		tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
6968
		tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
6562
		tmp &= ~(7 << 13);
6969
		tmp &= ~(7 << 13);
6563
		tmp |= (5 << 13);
6970
		tmp |= (5 << 13);
6564
		intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
6971
		intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
6565
 
6972
 
6566
		tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
6973
		tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
6567
		tmp &= ~(7 << 13);
6974
		tmp &= ~(7 << 13);
6568
		tmp |= (5 << 13);
6975
		tmp |= (5 << 13);
6569
		intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
6976
		intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
6570
 
6977
 
6571
	tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
6978
	tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
6572
	tmp &= ~0xFF;
6979
	tmp &= ~0xFF;
6573
	tmp |= 0x1C;
6980
	tmp |= 0x1C;
6574
	intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
6981
	intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
6575
 
6982
 
6576
	tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
6983
	tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
6577
	tmp &= ~0xFF;
6984
	tmp &= ~0xFF;
6578
	tmp |= 0x1C;
6985
	tmp |= 0x1C;
6579
	intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
6986
	intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
6580
 
6987
 
6581
	tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
6988
	tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
6582
	tmp &= ~(0xFF << 16);
6989
	tmp &= ~(0xFF << 16);
6583
	tmp |= (0x1C << 16);
6990
	tmp |= (0x1C << 16);
6584
	intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
6991
	intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
6585
 
6992
 
6586
	tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
6993
	tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
6587
	tmp &= ~(0xFF << 16);
6994
	tmp &= ~(0xFF << 16);
6588
	tmp |= (0x1C << 16);
6995
	tmp |= (0x1C << 16);
6589
	intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
6996
	intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
6590
 
6997
 
6591
		tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
6998
		tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
6592
		tmp |= (1 << 27);
6999
		tmp |= (1 << 27);
6593
		intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
7000
		intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
6594
 
7001
 
6595
		tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
7002
		tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
6596
		tmp |= (1 << 27);
7003
		tmp |= (1 << 27);
6597
		intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
7004
		intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
6598
 
7005
 
6599
		tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
7006
		tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
6600
		tmp &= ~(0xF << 28);
7007
		tmp &= ~(0xF << 28);
6601
		tmp |= (4 << 28);
7008
		tmp |= (4 << 28);
6602
		intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
7009
		intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
6603
 
7010
 
6604
		tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
7011
		tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
6605
		tmp &= ~(0xF << 28);
7012
		tmp &= ~(0xF << 28);
6606
		tmp |= (4 << 28);
7013
		tmp |= (4 << 28);
6607
		intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
7014
		intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
6608
}
7015
}
6609
 
7016
 
6610
/* Implements 3 different sequences from BSpec chapter "Display iCLK
7017
/* Implements 3 different sequences from BSpec chapter "Display iCLK
6611
 * Programming" based on the parameters passed:
7018
 * Programming" based on the parameters passed:
6612
 * - Sequence to enable CLKOUT_DP
7019
 * - Sequence to enable CLKOUT_DP
6613
 * - Sequence to enable CLKOUT_DP without spread
7020
 * - Sequence to enable CLKOUT_DP without spread
6614
 * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O
7021
 * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O
6615
 */
7022
 */
6616
static void lpt_enable_clkout_dp(struct drm_device *dev, bool with_spread,
7023
static void lpt_enable_clkout_dp(struct drm_device *dev, bool with_spread,
6617
				 bool with_fdi)
7024
				 bool with_fdi)
6618
{
7025
{
6619
	struct drm_i915_private *dev_priv = dev->dev_private;
7026
	struct drm_i915_private *dev_priv = dev->dev_private;
6620
	uint32_t reg, tmp;
7027
	uint32_t reg, tmp;
6621
 
7028
 
6622
	if (WARN(with_fdi && !with_spread, "FDI requires downspread\n"))
7029
	if (WARN(with_fdi && !with_spread, "FDI requires downspread\n"))
6623
		with_spread = true;
7030
		with_spread = true;
6624
	if (WARN(dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE &&
7031
	if (WARN(dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE &&
6625
		 with_fdi, "LP PCH doesn't have FDI\n"))
7032
		 with_fdi, "LP PCH doesn't have FDI\n"))
6626
		with_fdi = false;
7033
		with_fdi = false;
6627
 
7034
 
6628
	mutex_lock(&dev_priv->dpio_lock);
7035
	mutex_lock(&dev_priv->dpio_lock);
6629
 
7036
 
6630
	tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
7037
	tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
6631
	tmp &= ~SBI_SSCCTL_DISABLE;
7038
	tmp &= ~SBI_SSCCTL_DISABLE;
6632
	tmp |= SBI_SSCCTL_PATHALT;
7039
	tmp |= SBI_SSCCTL_PATHALT;
6633
	intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
7040
	intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
6634
 
7041
 
6635
	udelay(24);
7042
	udelay(24);
6636
 
7043
 
6637
	if (with_spread) {
7044
	if (with_spread) {
6638
		tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
7045
		tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
6639
		tmp &= ~SBI_SSCCTL_PATHALT;
7046
		tmp &= ~SBI_SSCCTL_PATHALT;
6640
		intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
7047
		intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
6641
 
7048
 
6642
		if (with_fdi) {
7049
		if (with_fdi) {
6643
			lpt_reset_fdi_mphy(dev_priv);
7050
			lpt_reset_fdi_mphy(dev_priv);
6644
			lpt_program_fdi_mphy(dev_priv);
7051
			lpt_program_fdi_mphy(dev_priv);
6645
		}
7052
		}
6646
	}
7053
	}
6647
 
7054
 
6648
	reg = (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) ?
7055
	reg = (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) ?
6649
	       SBI_GEN0 : SBI_DBUFF0;
7056
	       SBI_GEN0 : SBI_DBUFF0;
6650
	tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
7057
	tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
6651
	tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
7058
	tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
6652
	intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
7059
	intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
6653
 
7060
 
6654
	mutex_unlock(&dev_priv->dpio_lock);
7061
	mutex_unlock(&dev_priv->dpio_lock);
6655
}
7062
}
6656
 
7063
 
6657
/* Sequence to disable CLKOUT_DP */
7064
/* Sequence to disable CLKOUT_DP */
6658
static void lpt_disable_clkout_dp(struct drm_device *dev)
7065
static void lpt_disable_clkout_dp(struct drm_device *dev)
6659
{
7066
{
6660
	struct drm_i915_private *dev_priv = dev->dev_private;
7067
	struct drm_i915_private *dev_priv = dev->dev_private;
6661
	uint32_t reg, tmp;
7068
	uint32_t reg, tmp;
6662
 
7069
 
6663
	mutex_lock(&dev_priv->dpio_lock);
7070
	mutex_lock(&dev_priv->dpio_lock);
6664
 
7071
 
6665
	reg = (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) ?
7072
	reg = (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) ?
6666
	       SBI_GEN0 : SBI_DBUFF0;
7073
	       SBI_GEN0 : SBI_DBUFF0;
6667
	tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
7074
	tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
6668
	tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
7075
	tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
6669
	intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
7076
	intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
6670
 
7077
 
6671
	tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
7078
	tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
6672
	if (!(tmp & SBI_SSCCTL_DISABLE)) {
7079
	if (!(tmp & SBI_SSCCTL_DISABLE)) {
6673
		if (!(tmp & SBI_SSCCTL_PATHALT)) {
7080
		if (!(tmp & SBI_SSCCTL_PATHALT)) {
6674
			tmp |= SBI_SSCCTL_PATHALT;
7081
			tmp |= SBI_SSCCTL_PATHALT;
6675
			intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
7082
			intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
6676
			udelay(32);
7083
			udelay(32);
6677
		}
7084
		}
6678
		tmp |= SBI_SSCCTL_DISABLE;
7085
		tmp |= SBI_SSCCTL_DISABLE;
6679
		intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
7086
		intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
6680
	}
7087
	}
6681
 
7088
 
6682
	mutex_unlock(&dev_priv->dpio_lock);
7089
	mutex_unlock(&dev_priv->dpio_lock);
6683
}
7090
}
6684
 
7091
 
6685
static void lpt_init_pch_refclk(struct drm_device *dev)
7092
static void lpt_init_pch_refclk(struct drm_device *dev)
6686
{
7093
{
6687
	struct drm_mode_config *mode_config = &dev->mode_config;
-
 
6688
	struct intel_encoder *encoder;
7094
	struct intel_encoder *encoder;
6689
	bool has_vga = false;
7095
	bool has_vga = false;
6690
 
7096
 
6691
	list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
7097
	for_each_intel_encoder(dev, encoder) {
6692
		switch (encoder->type) {
7098
		switch (encoder->type) {
6693
		case INTEL_OUTPUT_ANALOG:
7099
		case INTEL_OUTPUT_ANALOG:
6694
			has_vga = true;
7100
			has_vga = true;
6695
			break;
7101
			break;
-
 
7102
		default:
-
 
7103
			break;
6696
		}
7104
		}
6697
	}
7105
	}
6698
 
7106
 
6699
	if (has_vga)
7107
	if (has_vga)
6700
		lpt_enable_clkout_dp(dev, true, true);
7108
		lpt_enable_clkout_dp(dev, true, true);
6701
	else
7109
	else
6702
		lpt_disable_clkout_dp(dev);
7110
		lpt_disable_clkout_dp(dev);
6703
}
7111
}
6704
 
7112
 
6705
/*
7113
/*
6706
 * Initialize reference clocks when the driver loads
7114
 * Initialize reference clocks when the driver loads
6707
 */
7115
 */
6708
void intel_init_pch_refclk(struct drm_device *dev)
7116
void intel_init_pch_refclk(struct drm_device *dev)
6709
{
7117
{
6710
	if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
7118
	if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
6711
		ironlake_init_pch_refclk(dev);
7119
		ironlake_init_pch_refclk(dev);
6712
	else if (HAS_PCH_LPT(dev))
7120
	else if (HAS_PCH_LPT(dev))
6713
		lpt_init_pch_refclk(dev);
7121
		lpt_init_pch_refclk(dev);
6714
}
7122
}
6715
 
7123
 
6716
static int ironlake_get_refclk(struct drm_crtc *crtc)
7124
static int ironlake_get_refclk(struct drm_crtc *crtc)
6717
{
7125
{
6718
	struct drm_device *dev = crtc->dev;
7126
	struct drm_device *dev = crtc->dev;
6719
	struct drm_i915_private *dev_priv = dev->dev_private;
7127
	struct drm_i915_private *dev_priv = dev->dev_private;
6720
	struct intel_encoder *encoder;
7128
	struct intel_encoder *encoder;
6721
	int num_connectors = 0;
7129
	int num_connectors = 0;
6722
	bool is_lvds = false;
7130
	bool is_lvds = false;
6723
 
7131
 
-
 
7132
	for_each_intel_encoder(dev, encoder) {
-
 
7133
		if (encoder->new_crtc != to_intel_crtc(crtc))
-
 
7134
			continue;
6724
	for_each_encoder_on_crtc(dev, crtc, encoder) {
7135
 
6725
		switch (encoder->type) {
7136
		switch (encoder->type) {
6726
		case INTEL_OUTPUT_LVDS:
7137
		case INTEL_OUTPUT_LVDS:
6727
			is_lvds = true;
7138
			is_lvds = true;
6728
			break;
7139
			break;
-
 
7140
		default:
-
 
7141
			break;
6729
		}
7142
		}
6730
		num_connectors++;
7143
		num_connectors++;
6731
	}
7144
	}
6732
 
7145
 
6733
	if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
7146
	if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
6734
		DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n",
7147
		DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n",
6735
			      dev_priv->vbt.lvds_ssc_freq);
7148
			      dev_priv->vbt.lvds_ssc_freq);
6736
		return dev_priv->vbt.lvds_ssc_freq;
7149
		return dev_priv->vbt.lvds_ssc_freq;
6737
	}
7150
	}
6738
 
7151
 
6739
	return 120000;
7152
	return 120000;
6740
}
7153
}
6741
 
7154
 
6742
static void ironlake_set_pipeconf(struct drm_crtc *crtc)
7155
static void ironlake_set_pipeconf(struct drm_crtc *crtc)
6743
{
7156
{
6744
	struct drm_i915_private *dev_priv = crtc->dev->dev_private;
7157
	struct drm_i915_private *dev_priv = crtc->dev->dev_private;
6745
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7158
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6746
	int pipe = intel_crtc->pipe;
7159
	int pipe = intel_crtc->pipe;
6747
	uint32_t val;
7160
	uint32_t val;
6748
 
7161
 
6749
	val = 0;
7162
	val = 0;
6750
 
7163
 
6751
	switch (intel_crtc->config.pipe_bpp) {
7164
	switch (intel_crtc->config.pipe_bpp) {
6752
	case 18:
7165
	case 18:
6753
		val |= PIPECONF_6BPC;
7166
		val |= PIPECONF_6BPC;
6754
		break;
7167
		break;
6755
	case 24:
7168
	case 24:
6756
		val |= PIPECONF_8BPC;
7169
		val |= PIPECONF_8BPC;
6757
		break;
7170
		break;
6758
	case 30:
7171
	case 30:
6759
		val |= PIPECONF_10BPC;
7172
		val |= PIPECONF_10BPC;
6760
		break;
7173
		break;
6761
	case 36:
7174
	case 36:
6762
		val |= PIPECONF_12BPC;
7175
		val |= PIPECONF_12BPC;
6763
		break;
7176
		break;
6764
	default:
7177
	default:
6765
		/* Case prevented by intel_choose_pipe_bpp_dither. */
7178
		/* Case prevented by intel_choose_pipe_bpp_dither. */
6766
		BUG();
7179
		BUG();
6767
	}
7180
	}
6768
 
7181
 
6769
	if (intel_crtc->config.dither)
7182
	if (intel_crtc->config.dither)
6770
		val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
7183
		val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
6771
 
7184
 
6772
	if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
7185
	if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
6773
		val |= PIPECONF_INTERLACED_ILK;
7186
		val |= PIPECONF_INTERLACED_ILK;
6774
	else
7187
	else
6775
		val |= PIPECONF_PROGRESSIVE;
7188
		val |= PIPECONF_PROGRESSIVE;
6776
 
7189
 
6777
	if (intel_crtc->config.limited_color_range)
7190
	if (intel_crtc->config.limited_color_range)
6778
		val |= PIPECONF_COLOR_RANGE_SELECT;
7191
		val |= PIPECONF_COLOR_RANGE_SELECT;
6779
 
7192
 
6780
	I915_WRITE(PIPECONF(pipe), val);
7193
	I915_WRITE(PIPECONF(pipe), val);
6781
	POSTING_READ(PIPECONF(pipe));
7194
	POSTING_READ(PIPECONF(pipe));
6782
}
7195
}
6783
 
7196
 
6784
/*
7197
/*
6785
 * Set up the pipe CSC unit.
7198
 * Set up the pipe CSC unit.
6786
 *
7199
 *
6787
 * Currently only full range RGB to limited range RGB conversion
7200
 * Currently only full range RGB to limited range RGB conversion
6788
 * is supported, but eventually this should handle various
7201
 * is supported, but eventually this should handle various
6789
 * RGB<->YCbCr scenarios as well.
7202
 * RGB<->YCbCr scenarios as well.
6790
 */
7203
 */
6791
static void intel_set_pipe_csc(struct drm_crtc *crtc)
7204
static void intel_set_pipe_csc(struct drm_crtc *crtc)
6792
{
7205
{
6793
	struct drm_device *dev = crtc->dev;
7206
	struct drm_device *dev = crtc->dev;
6794
	struct drm_i915_private *dev_priv = dev->dev_private;
7207
	struct drm_i915_private *dev_priv = dev->dev_private;
6795
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7208
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6796
	int pipe = intel_crtc->pipe;
7209
	int pipe = intel_crtc->pipe;
6797
	uint16_t coeff = 0x7800; /* 1.0 */
7210
	uint16_t coeff = 0x7800; /* 1.0 */
6798
 
7211
 
6799
	/*
7212
	/*
6800
	 * TODO: Check what kind of values actually come out of the pipe
7213
	 * TODO: Check what kind of values actually come out of the pipe
6801
	 * with these coeff/postoff values and adjust to get the best
7214
	 * with these coeff/postoff values and adjust to get the best
6802
	 * accuracy. Perhaps we even need to take the bpc value into
7215
	 * accuracy. Perhaps we even need to take the bpc value into
6803
	 * consideration.
7216
	 * consideration.
6804
	 */
7217
	 */
6805
 
7218
 
6806
	if (intel_crtc->config.limited_color_range)
7219
	if (intel_crtc->config.limited_color_range)
6807
		coeff = ((235 - 16) * (1 << 12) / 255) & 0xff8; /* 0.xxx... */
7220
		coeff = ((235 - 16) * (1 << 12) / 255) & 0xff8; /* 0.xxx... */
6808
 
7221
 
6809
	/*
7222
	/*
6810
	 * GY/GU and RY/RU should be the other way around according
7223
	 * GY/GU and RY/RU should be the other way around according
6811
	 * to BSpec, but reality doesn't agree. Just set them up in
7224
	 * to BSpec, but reality doesn't agree. Just set them up in
6812
	 * a way that results in the correct picture.
7225
	 * a way that results in the correct picture.
6813
	 */
7226
	 */
6814
	I915_WRITE(PIPE_CSC_COEFF_RY_GY(pipe), coeff << 16);
7227
	I915_WRITE(PIPE_CSC_COEFF_RY_GY(pipe), coeff << 16);
6815
	I915_WRITE(PIPE_CSC_COEFF_BY(pipe), 0);
7228
	I915_WRITE(PIPE_CSC_COEFF_BY(pipe), 0);
6816
 
7229
 
6817
	I915_WRITE(PIPE_CSC_COEFF_RU_GU(pipe), coeff);
7230
	I915_WRITE(PIPE_CSC_COEFF_RU_GU(pipe), coeff);
6818
	I915_WRITE(PIPE_CSC_COEFF_BU(pipe), 0);
7231
	I915_WRITE(PIPE_CSC_COEFF_BU(pipe), 0);
6819
 
7232
 
6820
	I915_WRITE(PIPE_CSC_COEFF_RV_GV(pipe), 0);
7233
	I915_WRITE(PIPE_CSC_COEFF_RV_GV(pipe), 0);
6821
	I915_WRITE(PIPE_CSC_COEFF_BV(pipe), coeff << 16);
7234
	I915_WRITE(PIPE_CSC_COEFF_BV(pipe), coeff << 16);
6822
 
7235
 
6823
	I915_WRITE(PIPE_CSC_PREOFF_HI(pipe), 0);
7236
	I915_WRITE(PIPE_CSC_PREOFF_HI(pipe), 0);
6824
	I915_WRITE(PIPE_CSC_PREOFF_ME(pipe), 0);
7237
	I915_WRITE(PIPE_CSC_PREOFF_ME(pipe), 0);
6825
	I915_WRITE(PIPE_CSC_PREOFF_LO(pipe), 0);
7238
	I915_WRITE(PIPE_CSC_PREOFF_LO(pipe), 0);
6826
 
7239
 
6827
	if (INTEL_INFO(dev)->gen > 6) {
7240
	if (INTEL_INFO(dev)->gen > 6) {
6828
		uint16_t postoff = 0;
7241
		uint16_t postoff = 0;
6829
 
7242
 
6830
		if (intel_crtc->config.limited_color_range)
7243
		if (intel_crtc->config.limited_color_range)
6831
			postoff = (16 * (1 << 12) / 255) & 0x1fff;
7244
			postoff = (16 * (1 << 12) / 255) & 0x1fff;
6832
 
7245
 
6833
		I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), postoff);
7246
		I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), postoff);
6834
		I915_WRITE(PIPE_CSC_POSTOFF_ME(pipe), postoff);
7247
		I915_WRITE(PIPE_CSC_POSTOFF_ME(pipe), postoff);
6835
		I915_WRITE(PIPE_CSC_POSTOFF_LO(pipe), postoff);
7248
		I915_WRITE(PIPE_CSC_POSTOFF_LO(pipe), postoff);
6836
 
7249
 
6837
		I915_WRITE(PIPE_CSC_MODE(pipe), 0);
7250
		I915_WRITE(PIPE_CSC_MODE(pipe), 0);
6838
	} else {
7251
	} else {
6839
		uint32_t mode = CSC_MODE_YUV_TO_RGB;
7252
		uint32_t mode = CSC_MODE_YUV_TO_RGB;
6840
 
7253
 
6841
		if (intel_crtc->config.limited_color_range)
7254
		if (intel_crtc->config.limited_color_range)
6842
			mode |= CSC_BLACK_SCREEN_OFFSET;
7255
			mode |= CSC_BLACK_SCREEN_OFFSET;
6843
 
7256
 
6844
		I915_WRITE(PIPE_CSC_MODE(pipe), mode);
7257
		I915_WRITE(PIPE_CSC_MODE(pipe), mode);
6845
	}
7258
	}
6846
}
7259
}
6847
 
7260
 
6848
static void haswell_set_pipeconf(struct drm_crtc *crtc)
7261
static void haswell_set_pipeconf(struct drm_crtc *crtc)
6849
{
7262
{
6850
	struct drm_device *dev = crtc->dev;
7263
	struct drm_device *dev = crtc->dev;
6851
	struct drm_i915_private *dev_priv = dev->dev_private;
7264
	struct drm_i915_private *dev_priv = dev->dev_private;
6852
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7265
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6853
	enum pipe pipe = intel_crtc->pipe;
7266
	enum pipe pipe = intel_crtc->pipe;
6854
	enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
7267
	enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
6855
	uint32_t val;
7268
	uint32_t val;
6856
 
7269
 
6857
	val = 0;
7270
	val = 0;
6858
 
7271
 
6859
	if (IS_HASWELL(dev) && intel_crtc->config.dither)
7272
	if (IS_HASWELL(dev) && intel_crtc->config.dither)
6860
		val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
7273
		val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
6861
 
7274
 
6862
	if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
7275
	if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
6863
		val |= PIPECONF_INTERLACED_ILK;
7276
		val |= PIPECONF_INTERLACED_ILK;
6864
	else
7277
	else
6865
		val |= PIPECONF_PROGRESSIVE;
7278
		val |= PIPECONF_PROGRESSIVE;
6866
 
7279
 
6867
	I915_WRITE(PIPECONF(cpu_transcoder), val);
7280
	I915_WRITE(PIPECONF(cpu_transcoder), val);
6868
	POSTING_READ(PIPECONF(cpu_transcoder));
7281
	POSTING_READ(PIPECONF(cpu_transcoder));
6869
 
7282
 
6870
	I915_WRITE(GAMMA_MODE(intel_crtc->pipe), GAMMA_MODE_MODE_8BIT);
7283
	I915_WRITE(GAMMA_MODE(intel_crtc->pipe), GAMMA_MODE_MODE_8BIT);
6871
	POSTING_READ(GAMMA_MODE(intel_crtc->pipe));
7284
	POSTING_READ(GAMMA_MODE(intel_crtc->pipe));
6872
 
7285
 
6873
	if (IS_BROADWELL(dev)) {
7286
	if (IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9) {
6874
		val = 0;
7287
		val = 0;
6875
 
7288
 
6876
		switch (intel_crtc->config.pipe_bpp) {
7289
		switch (intel_crtc->config.pipe_bpp) {
6877
		case 18:
7290
		case 18:
6878
			val |= PIPEMISC_DITHER_6_BPC;
7291
			val |= PIPEMISC_DITHER_6_BPC;
6879
			break;
7292
			break;
6880
		case 24:
7293
		case 24:
6881
			val |= PIPEMISC_DITHER_8_BPC;
7294
			val |= PIPEMISC_DITHER_8_BPC;
6882
			break;
7295
			break;
6883
		case 30:
7296
		case 30:
6884
			val |= PIPEMISC_DITHER_10_BPC;
7297
			val |= PIPEMISC_DITHER_10_BPC;
6885
			break;
7298
			break;
6886
		case 36:
7299
		case 36:
6887
			val |= PIPEMISC_DITHER_12_BPC;
7300
			val |= PIPEMISC_DITHER_12_BPC;
6888
			break;
7301
			break;
6889
		default:
7302
		default:
6890
			/* Case prevented by pipe_config_set_bpp. */
7303
			/* Case prevented by pipe_config_set_bpp. */
6891
			BUG();
7304
			BUG();
6892
		}
7305
		}
6893
 
7306
 
6894
		if (intel_crtc->config.dither)
7307
		if (intel_crtc->config.dither)
6895
			val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
7308
			val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
6896
 
7309
 
6897
		I915_WRITE(PIPEMISC(pipe), val);
7310
		I915_WRITE(PIPEMISC(pipe), val);
6898
	}
7311
	}
6899
}
7312
}
6900
 
7313
 
6901
static bool ironlake_compute_clocks(struct drm_crtc *crtc,
7314
static bool ironlake_compute_clocks(struct drm_crtc *crtc,
6902
				    intel_clock_t *clock,
7315
				    intel_clock_t *clock,
6903
				    bool *has_reduced_clock,
7316
				    bool *has_reduced_clock,
6904
				    intel_clock_t *reduced_clock)
7317
				    intel_clock_t *reduced_clock)
6905
{
7318
{
6906
	struct drm_device *dev = crtc->dev;
7319
	struct drm_device *dev = crtc->dev;
6907
	struct drm_i915_private *dev_priv = dev->dev_private;
7320
	struct drm_i915_private *dev_priv = dev->dev_private;
6908
	struct intel_encoder *intel_encoder;
7321
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6909
	int refclk;
7322
	int refclk;
6910
	const intel_limit_t *limit;
7323
	const intel_limit_t *limit;
6911
	bool ret, is_lvds = false;
7324
	bool ret, is_lvds = false;
6912
 
7325
 
6913
	for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
-
 
6914
		switch (intel_encoder->type) {
-
 
6915
		case INTEL_OUTPUT_LVDS:
-
 
6916
			is_lvds = true;
-
 
6917
			break;
-
 
6918
		}
-
 
6919
	}
7326
	is_lvds = intel_pipe_will_have_type(intel_crtc, INTEL_OUTPUT_LVDS);
6920
 
7327
 
6921
	refclk = ironlake_get_refclk(crtc);
7328
	refclk = ironlake_get_refclk(crtc);
6922
 
7329
 
6923
	/*
7330
	/*
6924
	 * Returns a set of divisors for the desired target clock with the given
7331
	 * Returns a set of divisors for the desired target clock with the given
6925
	 * refclk, or FALSE.  The returned values represent the clock equation:
7332
	 * refclk, or FALSE.  The returned values represent the clock equation:
6926
	 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
7333
	 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
6927
	 */
7334
	 */
6928
	limit = intel_limit(crtc, refclk);
7335
	limit = intel_limit(intel_crtc, refclk);
6929
	ret = dev_priv->display.find_dpll(limit, crtc,
7336
	ret = dev_priv->display.find_dpll(limit, intel_crtc,
6930
					  to_intel_crtc(crtc)->config.port_clock,
7337
					  intel_crtc->new_config->port_clock,
6931
					  refclk, NULL, clock);
7338
					  refclk, NULL, clock);
6932
	if (!ret)
7339
	if (!ret)
6933
		return false;
7340
		return false;
6934
 
7341
 
6935
	if (is_lvds && dev_priv->lvds_downclock_avail) {
7342
	if (is_lvds && dev_priv->lvds_downclock_avail) {
6936
		/*
7343
		/*
6937
		 * Ensure we match the reduced clock's P to the target clock.
7344
		 * Ensure we match the reduced clock's P to the target clock.
6938
		 * If the clocks don't match, we can't switch the display clock
7345
		 * If the clocks don't match, we can't switch the display clock
6939
		 * by using the FP0/FP1. In such case we will disable the LVDS
7346
		 * by using the FP0/FP1. In such case we will disable the LVDS
6940
		 * downclock feature.
7347
		 * downclock feature.
6941
		*/
7348
		*/
6942
		*has_reduced_clock =
7349
		*has_reduced_clock =
6943
			dev_priv->display.find_dpll(limit, crtc,
7350
			dev_priv->display.find_dpll(limit, intel_crtc,
6944
						     dev_priv->lvds_downclock,
7351
						     dev_priv->lvds_downclock,
6945
						    refclk, clock,
7352
						    refclk, clock,
6946
						     reduced_clock);
7353
						     reduced_clock);
6947
	}
7354
	}
6948
 
7355
 
6949
	return true;
7356
	return true;
6950
}
7357
}
6951
 
7358
 
6952
int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp)
7359
int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp)
6953
{
7360
{
6954
	/*
7361
	/*
6955
	 * Account for spread spectrum to avoid
7362
	 * Account for spread spectrum to avoid
6956
	 * oversubscribing the link. Max center spread
7363
	 * oversubscribing the link. Max center spread
6957
	 * is 2.5%; use 5% for safety's sake.
7364
	 * is 2.5%; use 5% for safety's sake.
6958
	 */
7365
	 */
6959
	u32 bps = target_clock * bpp * 21 / 20;
7366
	u32 bps = target_clock * bpp * 21 / 20;
6960
	return DIV_ROUND_UP(bps, link_bw * 8);
7367
	return DIV_ROUND_UP(bps, link_bw * 8);
6961
}
7368
}
6962
 
7369
 
6963
static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor)
7370
static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor)
6964
{
7371
{
6965
	return i9xx_dpll_compute_m(dpll) < factor * dpll->n;
7372
	return i9xx_dpll_compute_m(dpll) < factor * dpll->n;
6966
}
7373
}
6967
 
7374
 
6968
static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
7375
static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
6969
				      u32 *fp,
7376
				      u32 *fp,
6970
				      intel_clock_t *reduced_clock, u32 *fp2)
7377
				      intel_clock_t *reduced_clock, u32 *fp2)
6971
{
7378
{
6972
	struct drm_crtc *crtc = &intel_crtc->base;
7379
	struct drm_crtc *crtc = &intel_crtc->base;
6973
	struct drm_device *dev = crtc->dev;
7380
	struct drm_device *dev = crtc->dev;
6974
	struct drm_i915_private *dev_priv = dev->dev_private;
7381
	struct drm_i915_private *dev_priv = dev->dev_private;
6975
	struct intel_encoder *intel_encoder;
7382
	struct intel_encoder *intel_encoder;
6976
	uint32_t dpll;
7383
	uint32_t dpll;
6977
	int factor, num_connectors = 0;
7384
	int factor, num_connectors = 0;
6978
	bool is_lvds = false, is_sdvo = false;
7385
	bool is_lvds = false, is_sdvo = false;
6979
 
7386
 
-
 
7387
	for_each_intel_encoder(dev, intel_encoder) {
-
 
7388
		if (intel_encoder->new_crtc != to_intel_crtc(crtc))
-
 
7389
			continue;
6980
	for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
7390
 
6981
		switch (intel_encoder->type) {
7391
		switch (intel_encoder->type) {
6982
		case INTEL_OUTPUT_LVDS:
7392
		case INTEL_OUTPUT_LVDS:
6983
			is_lvds = true;
7393
			is_lvds = true;
6984
			break;
7394
			break;
6985
		case INTEL_OUTPUT_SDVO:
7395
		case INTEL_OUTPUT_SDVO:
6986
		case INTEL_OUTPUT_HDMI:
7396
		case INTEL_OUTPUT_HDMI:
6987
			is_sdvo = true;
7397
			is_sdvo = true;
6988
			break;
7398
			break;
-
 
7399
		default:
-
 
7400
			break;
6989
		}
7401
		}
6990
 
7402
 
6991
		num_connectors++;
7403
		num_connectors++;
6992
	}
7404
	}
6993
 
7405
 
6994
    /* Enable autotuning of the PLL clock (if permissible) */
7406
    /* Enable autotuning of the PLL clock (if permissible) */
6995
    factor = 21;
7407
    factor = 21;
6996
    if (is_lvds) {
7408
    if (is_lvds) {
6997
        if ((intel_panel_use_ssc(dev_priv) &&
7409
        if ((intel_panel_use_ssc(dev_priv) &&
6998
		     dev_priv->vbt.lvds_ssc_freq == 100000) ||
7410
		     dev_priv->vbt.lvds_ssc_freq == 100000) ||
6999
		    (HAS_PCH_IBX(dev) && intel_is_dual_link_lvds(dev)))
7411
		    (HAS_PCH_IBX(dev) && intel_is_dual_link_lvds(dev)))
7000
            factor = 25;
7412
            factor = 25;
7001
	} else if (intel_crtc->config.sdvo_tv_clock)
7413
	} else if (intel_crtc->new_config->sdvo_tv_clock)
7002
        factor = 20;
7414
        factor = 20;
7003
 
7415
 
7004
	if (ironlake_needs_fb_cb_tune(&intel_crtc->config.dpll, factor))
7416
	if (ironlake_needs_fb_cb_tune(&intel_crtc->new_config->dpll, factor))
7005
		*fp |= FP_CB_TUNE;
7417
		*fp |= FP_CB_TUNE;
7006
 
7418
 
7007
	if (fp2 && (reduced_clock->m < factor * reduced_clock->n))
7419
	if (fp2 && (reduced_clock->m < factor * reduced_clock->n))
7008
		*fp2 |= FP_CB_TUNE;
7420
		*fp2 |= FP_CB_TUNE;
7009
 
7421
 
7010
    dpll = 0;
7422
    dpll = 0;
7011
 
7423
 
7012
    if (is_lvds)
7424
    if (is_lvds)
7013
        dpll |= DPLLB_MODE_LVDS;
7425
        dpll |= DPLLB_MODE_LVDS;
7014
    else
7426
    else
7015
        dpll |= DPLLB_MODE_DAC_SERIAL;
7427
        dpll |= DPLLB_MODE_DAC_SERIAL;
7016
 
7428
 
7017
			dpll |= (intel_crtc->config.pixel_multiplier - 1)
7429
	dpll |= (intel_crtc->new_config->pixel_multiplier - 1)
7018
				<< PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
7430
				<< PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
7019
 
7431
 
7020
	if (is_sdvo)
7432
	if (is_sdvo)
7021
		dpll |= DPLL_SDVO_HIGH_SPEED;
7433
		dpll |= DPLL_SDVO_HIGH_SPEED;
7022
	if (intel_crtc->config.has_dp_encoder)
7434
	if (intel_crtc->new_config->has_dp_encoder)
7023
		dpll |= DPLL_SDVO_HIGH_SPEED;
7435
		dpll |= DPLL_SDVO_HIGH_SPEED;
7024
 
7436
 
7025
    /* compute bitmask from p1 value */
7437
    /* compute bitmask from p1 value */
7026
	dpll |= (1 << (intel_crtc->config.dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
7438
	dpll |= (1 << (intel_crtc->new_config->dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
7027
    /* also FPA1 */
7439
    /* also FPA1 */
7028
	dpll |= (1 << (intel_crtc->config.dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
7440
	dpll |= (1 << (intel_crtc->new_config->dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
7029
 
7441
 
7030
	switch (intel_crtc->config.dpll.p2) {
7442
	switch (intel_crtc->new_config->dpll.p2) {
7031
    case 5:
7443
    case 5:
7032
        dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
7444
        dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
7033
        break;
7445
        break;
7034
    case 7:
7446
    case 7:
7035
        dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
7447
        dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
7036
        break;
7448
        break;
7037
    case 10:
7449
    case 10:
7038
        dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
7450
        dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
7039
        break;
7451
        break;
7040
    case 14:
7452
    case 14:
7041
        dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
7453
        dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
7042
        break;
7454
        break;
7043
    }
7455
    }
7044
 
7456
 
7045
	if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2)
7457
	if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2)
7046
        dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
7458
        dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
7047
    else
7459
    else
7048
        dpll |= PLL_REF_INPUT_DREFCLK;
7460
        dpll |= PLL_REF_INPUT_DREFCLK;
7049
 
7461
 
7050
	return dpll | DPLL_VCO_ENABLE;
7462
	return dpll | DPLL_VCO_ENABLE;
7051
}
7463
}
7052
 
7464
 
7053
static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
-
 
7054
				  int x, int y,
-
 
7055
				  struct drm_framebuffer *fb)
7465
static int ironlake_crtc_compute_clock(struct intel_crtc *crtc)
7056
{
7466
{
7057
	struct drm_device *dev = crtc->dev;
-
 
7058
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-
 
7059
	int num_connectors = 0;
7467
	struct drm_device *dev = crtc->base.dev;
7060
	intel_clock_t clock, reduced_clock;
7468
	intel_clock_t clock, reduced_clock;
7061
	u32 dpll = 0, fp = 0, fp2 = 0;
7469
	u32 dpll = 0, fp = 0, fp2 = 0;
7062
	bool ok, has_reduced_clock = false;
7470
	bool ok, has_reduced_clock = false;
7063
	bool is_lvds = false;
7471
	bool is_lvds = false;
7064
	struct intel_encoder *encoder;
-
 
7065
	struct intel_shared_dpll *pll;
7472
	struct intel_shared_dpll *pll;
7066
 
-
 
7067
	for_each_encoder_on_crtc(dev, crtc, encoder) {
-
 
7068
		switch (encoder->type) {
7473
 
7069
		case INTEL_OUTPUT_LVDS:
-
 
7070
			is_lvds = true;
-
 
7071
			break;
-
 
7072
		}
-
 
7073
 
-
 
7074
		num_connectors++;
-
 
7075
	}
7474
	is_lvds = intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS);
7076
 
7475
 
7077
	WARN(!(HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)),
7476
	WARN(!(HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)),
7078
	     "Unexpected PCH type %d\n", INTEL_PCH_TYPE(dev));
7477
	     "Unexpected PCH type %d\n", INTEL_PCH_TYPE(dev));
7079
 
7478
 
7080
	ok = ironlake_compute_clocks(crtc, &clock,
7479
	ok = ironlake_compute_clocks(&crtc->base, &clock,
7081
				     &has_reduced_clock, &reduced_clock);
7480
				     &has_reduced_clock, &reduced_clock);
7082
	if (!ok && !intel_crtc->config.clock_set) {
7481
	if (!ok && !crtc->new_config->clock_set) {
7083
		DRM_ERROR("Couldn't find PLL settings for mode!\n");
7482
		DRM_ERROR("Couldn't find PLL settings for mode!\n");
7084
		return -EINVAL;
7483
		return -EINVAL;
7085
	}
7484
	}
7086
	/* Compat-code for transition, will disappear. */
7485
	/* Compat-code for transition, will disappear. */
7087
	if (!intel_crtc->config.clock_set) {
7486
	if (!crtc->new_config->clock_set) {
7088
		intel_crtc->config.dpll.n = clock.n;
7487
		crtc->new_config->dpll.n = clock.n;
7089
		intel_crtc->config.dpll.m1 = clock.m1;
7488
		crtc->new_config->dpll.m1 = clock.m1;
7090
		intel_crtc->config.dpll.m2 = clock.m2;
7489
		crtc->new_config->dpll.m2 = clock.m2;
7091
		intel_crtc->config.dpll.p1 = clock.p1;
7490
		crtc->new_config->dpll.p1 = clock.p1;
7092
		intel_crtc->config.dpll.p2 = clock.p2;
7491
		crtc->new_config->dpll.p2 = clock.p2;
7093
	}
7492
	}
7094
 
7493
 
7095
	/* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
7494
	/* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
7096
	if (intel_crtc->config.has_pch_encoder) {
7495
	if (crtc->new_config->has_pch_encoder) {
7097
		fp = i9xx_dpll_compute_fp(&intel_crtc->config.dpll);
7496
		fp = i9xx_dpll_compute_fp(&crtc->new_config->dpll);
7098
	if (has_reduced_clock)
7497
	if (has_reduced_clock)
7099
			fp2 = i9xx_dpll_compute_fp(&reduced_clock);
7498
			fp2 = i9xx_dpll_compute_fp(&reduced_clock);
7100
 
7499
 
7101
		dpll = ironlake_compute_dpll(intel_crtc,
7500
		dpll = ironlake_compute_dpll(crtc,
7102
					     &fp, &reduced_clock,
7501
					     &fp, &reduced_clock,
7103
					     has_reduced_clock ? &fp2 : NULL);
7502
					     has_reduced_clock ? &fp2 : NULL);
7104
 
7503
 
7105
		intel_crtc->config.dpll_hw_state.dpll = dpll;
7504
		crtc->new_config->dpll_hw_state.dpll = dpll;
7106
		intel_crtc->config.dpll_hw_state.fp0 = fp;
7505
		crtc->new_config->dpll_hw_state.fp0 = fp;
7107
		if (has_reduced_clock)
7506
		if (has_reduced_clock)
7108
			intel_crtc->config.dpll_hw_state.fp1 = fp2;
7507
			crtc->new_config->dpll_hw_state.fp1 = fp2;
7109
		else
7508
		else
7110
			intel_crtc->config.dpll_hw_state.fp1 = fp;
7509
			crtc->new_config->dpll_hw_state.fp1 = fp;
7111
 
7510
 
7112
		pll = intel_get_shared_dpll(intel_crtc);
7511
		pll = intel_get_shared_dpll(crtc);
7113
		if (pll == NULL) {
7512
		if (pll == NULL) {
7114
			DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
7513
			DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
7115
					 pipe_name(intel_crtc->pipe));
7514
					 pipe_name(crtc->pipe));
7116
			return -EINVAL;
7515
			return -EINVAL;
7117
        }
-
 
7118
	} else
7516
        }
7119
		intel_put_shared_dpll(intel_crtc);
7517
	}
7120
 
7518
 
7121
	if (is_lvds && has_reduced_clock && i915.powersave)
7519
	if (is_lvds && has_reduced_clock && i915.powersave)
7122
		intel_crtc->lowfreq_avail = true;
7520
		crtc->lowfreq_avail = true;
7123
	else
7521
	else
7124
		intel_crtc->lowfreq_avail = false;
7522
		crtc->lowfreq_avail = false;
7125
 
7523
 
7126
	return 0;
7524
	return 0;
7127
}
7525
}
7128
 
7526
 
7129
static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
7527
static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
7130
					 struct intel_link_m_n *m_n)
7528
					 struct intel_link_m_n *m_n)
7131
{
7529
{
7132
	struct drm_device *dev = crtc->base.dev;
7530
	struct drm_device *dev = crtc->base.dev;
7133
	struct drm_i915_private *dev_priv = dev->dev_private;
7531
	struct drm_i915_private *dev_priv = dev->dev_private;
7134
	enum pipe pipe = crtc->pipe;
7532
	enum pipe pipe = crtc->pipe;
7135
 
7533
 
7136
	m_n->link_m = I915_READ(PCH_TRANS_LINK_M1(pipe));
7534
	m_n->link_m = I915_READ(PCH_TRANS_LINK_M1(pipe));
7137
	m_n->link_n = I915_READ(PCH_TRANS_LINK_N1(pipe));
7535
	m_n->link_n = I915_READ(PCH_TRANS_LINK_N1(pipe));
7138
	m_n->gmch_m = I915_READ(PCH_TRANS_DATA_M1(pipe))
7536
	m_n->gmch_m = I915_READ(PCH_TRANS_DATA_M1(pipe))
7139
		& ~TU_SIZE_MASK;
7537
		& ~TU_SIZE_MASK;
7140
	m_n->gmch_n = I915_READ(PCH_TRANS_DATA_N1(pipe));
7538
	m_n->gmch_n = I915_READ(PCH_TRANS_DATA_N1(pipe));
7141
	m_n->tu = ((I915_READ(PCH_TRANS_DATA_M1(pipe))
7539
	m_n->tu = ((I915_READ(PCH_TRANS_DATA_M1(pipe))
7142
		    & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
7540
		    & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
7143
}
7541
}
7144
 
7542
 
7145
static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
7543
static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
7146
					 enum transcoder transcoder,
7544
					 enum transcoder transcoder,
7147
					 struct intel_link_m_n *m_n)
7545
					 struct intel_link_m_n *m_n,
-
 
7546
					 struct intel_link_m_n *m2_n2)
7148
{
7547
{
7149
	struct drm_device *dev = crtc->base.dev;
7548
	struct drm_device *dev = crtc->base.dev;
7150
	struct drm_i915_private *dev_priv = dev->dev_private;
7549
	struct drm_i915_private *dev_priv = dev->dev_private;
7151
	enum pipe pipe = crtc->pipe;
7550
	enum pipe pipe = crtc->pipe;
7152
 
7551
 
7153
	if (INTEL_INFO(dev)->gen >= 5) {
7552
	if (INTEL_INFO(dev)->gen >= 5) {
7154
		m_n->link_m = I915_READ(PIPE_LINK_M1(transcoder));
7553
		m_n->link_m = I915_READ(PIPE_LINK_M1(transcoder));
7155
		m_n->link_n = I915_READ(PIPE_LINK_N1(transcoder));
7554
		m_n->link_n = I915_READ(PIPE_LINK_N1(transcoder));
7156
		m_n->gmch_m = I915_READ(PIPE_DATA_M1(transcoder))
7555
		m_n->gmch_m = I915_READ(PIPE_DATA_M1(transcoder))
7157
					& ~TU_SIZE_MASK;
7556
					& ~TU_SIZE_MASK;
7158
		m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder));
7557
		m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder));
7159
		m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder))
7558
		m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder))
7160
				   & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
7559
				   & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
-
 
7560
		/* Read M2_N2 registers only for gen < 8 (M2_N2 available for
-
 
7561
		 * gen < 8) and if DRRS is supported (to make sure the
-
 
7562
		 * registers are not unnecessarily read).
-
 
7563
		 */
-
 
7564
		if (m2_n2 && INTEL_INFO(dev)->gen < 8 &&
-
 
7565
			crtc->config.has_drrs) {
-
 
7566
			m2_n2->link_m = I915_READ(PIPE_LINK_M2(transcoder));
-
 
7567
			m2_n2->link_n =	I915_READ(PIPE_LINK_N2(transcoder));
-
 
7568
			m2_n2->gmch_m =	I915_READ(PIPE_DATA_M2(transcoder))
-
 
7569
					& ~TU_SIZE_MASK;
-
 
7570
			m2_n2->gmch_n =	I915_READ(PIPE_DATA_N2(transcoder));
-
 
7571
			m2_n2->tu = ((I915_READ(PIPE_DATA_M2(transcoder))
-
 
7572
					& TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
-
 
7573
		}
7161
	} else {
7574
	} else {
7162
		m_n->link_m = I915_READ(PIPE_LINK_M_G4X(pipe));
7575
		m_n->link_m = I915_READ(PIPE_LINK_M_G4X(pipe));
7163
		m_n->link_n = I915_READ(PIPE_LINK_N_G4X(pipe));
7576
		m_n->link_n = I915_READ(PIPE_LINK_N_G4X(pipe));
7164
		m_n->gmch_m = I915_READ(PIPE_DATA_M_G4X(pipe))
7577
		m_n->gmch_m = I915_READ(PIPE_DATA_M_G4X(pipe))
7165
			& ~TU_SIZE_MASK;
7578
			& ~TU_SIZE_MASK;
7166
		m_n->gmch_n = I915_READ(PIPE_DATA_N_G4X(pipe));
7579
		m_n->gmch_n = I915_READ(PIPE_DATA_N_G4X(pipe));
7167
		m_n->tu = ((I915_READ(PIPE_DATA_M_G4X(pipe))
7580
		m_n->tu = ((I915_READ(PIPE_DATA_M_G4X(pipe))
7168
			    & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
7581
			    & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
7169
	}
7582
	}
7170
}
7583
}
7171
 
7584
 
7172
void intel_dp_get_m_n(struct intel_crtc *crtc,
7585
void intel_dp_get_m_n(struct intel_crtc *crtc,
7173
		      struct intel_crtc_config *pipe_config)
7586
		      struct intel_crtc_config *pipe_config)
7174
{
7587
{
7175
	if (crtc->config.has_pch_encoder)
7588
	if (crtc->config.has_pch_encoder)
7176
		intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n);
7589
		intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n);
7177
	else
7590
	else
7178
		intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
7591
		intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
-
 
7592
					     &pipe_config->dp_m_n,
7179
					     &pipe_config->dp_m_n);
7593
					     &pipe_config->dp_m2_n2);
7180
}
7594
}
7181
 
7595
 
7182
static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc,
7596
static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc,
7183
					struct intel_crtc_config *pipe_config)
7597
					struct intel_crtc_config *pipe_config)
7184
{
7598
{
7185
	intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
7599
	intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
7186
				     &pipe_config->fdi_m_n);
7600
				     &pipe_config->fdi_m_n, NULL);
-
 
7601
}
-
 
7602
 
-
 
7603
static void skylake_get_pfit_config(struct intel_crtc *crtc,
-
 
7604
				    struct intel_crtc_config *pipe_config)
-
 
7605
{
-
 
7606
	struct drm_device *dev = crtc->base.dev;
-
 
7607
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
7608
	uint32_t tmp;
-
 
7609
 
-
 
7610
	tmp = I915_READ(PS_CTL(crtc->pipe));
-
 
7611
 
-
 
7612
	if (tmp & PS_ENABLE) {
-
 
7613
		pipe_config->pch_pfit.enabled = true;
-
 
7614
		pipe_config->pch_pfit.pos = I915_READ(PS_WIN_POS(crtc->pipe));
-
 
7615
		pipe_config->pch_pfit.size = I915_READ(PS_WIN_SZ(crtc->pipe));
-
 
7616
	}
7187
}
7617
}
7188
 
7618
 
7189
static void ironlake_get_pfit_config(struct intel_crtc *crtc,
7619
static void ironlake_get_pfit_config(struct intel_crtc *crtc,
7190
				     struct intel_crtc_config *pipe_config)
7620
				     struct intel_crtc_config *pipe_config)
7191
{
7621
{
7192
	struct drm_device *dev = crtc->base.dev;
7622
	struct drm_device *dev = crtc->base.dev;
7193
	struct drm_i915_private *dev_priv = dev->dev_private;
7623
	struct drm_i915_private *dev_priv = dev->dev_private;
7194
	uint32_t tmp;
7624
	uint32_t tmp;
7195
 
7625
 
7196
	tmp = I915_READ(PF_CTL(crtc->pipe));
7626
	tmp = I915_READ(PF_CTL(crtc->pipe));
7197
 
7627
 
7198
	if (tmp & PF_ENABLE) {
7628
	if (tmp & PF_ENABLE) {
7199
		pipe_config->pch_pfit.enabled = true;
7629
		pipe_config->pch_pfit.enabled = true;
7200
		pipe_config->pch_pfit.pos = I915_READ(PF_WIN_POS(crtc->pipe));
7630
		pipe_config->pch_pfit.pos = I915_READ(PF_WIN_POS(crtc->pipe));
7201
		pipe_config->pch_pfit.size = I915_READ(PF_WIN_SZ(crtc->pipe));
7631
		pipe_config->pch_pfit.size = I915_READ(PF_WIN_SZ(crtc->pipe));
7202
 
7632
 
7203
		/* We currently do not free assignements of panel fitters on
7633
		/* We currently do not free assignements of panel fitters on
7204
		 * ivb/hsw (since we don't use the higher upscaling modes which
7634
		 * ivb/hsw (since we don't use the higher upscaling modes which
7205
		 * differentiates them) so just WARN about this case for now. */
7635
		 * differentiates them) so just WARN about this case for now. */
7206
		if (IS_GEN7(dev)) {
7636
		if (IS_GEN7(dev)) {
7207
			WARN_ON((tmp & PF_PIPE_SEL_MASK_IVB) !=
7637
			WARN_ON((tmp & PF_PIPE_SEL_MASK_IVB) !=
7208
				PF_PIPE_SEL_IVB(crtc->pipe));
7638
				PF_PIPE_SEL_IVB(crtc->pipe));
7209
		}
7639
		}
7210
	}
7640
	}
7211
}
7641
}
7212
 
7642
 
7213
static void ironlake_get_plane_config(struct intel_crtc *crtc,
7643
static void ironlake_get_plane_config(struct intel_crtc *crtc,
7214
				      struct intel_plane_config *plane_config)
7644
				      struct intel_plane_config *plane_config)
7215
{
7645
{
7216
	struct drm_device *dev = crtc->base.dev;
7646
	struct drm_device *dev = crtc->base.dev;
7217
	struct drm_i915_private *dev_priv = dev->dev_private;
7647
	struct drm_i915_private *dev_priv = dev->dev_private;
7218
	u32 val, base, offset;
7648
	u32 val, base, offset;
7219
	int pipe = crtc->pipe, plane = crtc->plane;
7649
	int pipe = crtc->pipe, plane = crtc->plane;
7220
	int fourcc, pixel_format;
7650
	int fourcc, pixel_format;
7221
	int aligned_height;
7651
	int aligned_height;
7222
 
7652
 
7223
	crtc->base.primary->fb = kzalloc(sizeof(struct intel_framebuffer), GFP_KERNEL);
7653
	crtc->base.primary->fb = kzalloc(sizeof(struct intel_framebuffer), GFP_KERNEL);
7224
	if (!crtc->base.primary->fb) {
7654
	if (!crtc->base.primary->fb) {
7225
		DRM_DEBUG_KMS("failed to alloc fb\n");
7655
		DRM_DEBUG_KMS("failed to alloc fb\n");
7226
		return;
7656
		return;
7227
	}
7657
	}
7228
 
7658
 
7229
	val = I915_READ(DSPCNTR(plane));
7659
	val = I915_READ(DSPCNTR(plane));
7230
 
7660
 
7231
	if (INTEL_INFO(dev)->gen >= 4)
7661
	if (INTEL_INFO(dev)->gen >= 4)
7232
		if (val & DISPPLANE_TILED)
7662
		if (val & DISPPLANE_TILED)
7233
			plane_config->tiled = true;
7663
			plane_config->tiled = true;
7234
 
7664
 
7235
	pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
7665
	pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
7236
	fourcc = intel_format_to_fourcc(pixel_format);
7666
	fourcc = intel_format_to_fourcc(pixel_format);
7237
	crtc->base.primary->fb->pixel_format = fourcc;
7667
	crtc->base.primary->fb->pixel_format = fourcc;
7238
	crtc->base.primary->fb->bits_per_pixel =
7668
	crtc->base.primary->fb->bits_per_pixel =
7239
		drm_format_plane_cpp(fourcc, 0) * 8;
7669
		drm_format_plane_cpp(fourcc, 0) * 8;
7240
 
7670
 
7241
	base = I915_READ(DSPSURF(plane)) & 0xfffff000;
7671
	base = I915_READ(DSPSURF(plane)) & 0xfffff000;
7242
	if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
7672
	if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
7243
		offset = I915_READ(DSPOFFSET(plane));
7673
		offset = I915_READ(DSPOFFSET(plane));
7244
	} else {
7674
	} else {
7245
		if (plane_config->tiled)
7675
		if (plane_config->tiled)
7246
			offset = I915_READ(DSPTILEOFF(plane));
7676
			offset = I915_READ(DSPTILEOFF(plane));
7247
		else
7677
		else
7248
			offset = I915_READ(DSPLINOFF(plane));
7678
			offset = I915_READ(DSPLINOFF(plane));
7249
	}
7679
	}
7250
	plane_config->base = base;
7680
	plane_config->base = base;
7251
 
7681
 
7252
	val = I915_READ(PIPESRC(pipe));
7682
	val = I915_READ(PIPESRC(pipe));
7253
	crtc->base.primary->fb->width = ((val >> 16) & 0xfff) + 1;
7683
	crtc->base.primary->fb->width = ((val >> 16) & 0xfff) + 1;
7254
	crtc->base.primary->fb->height = ((val >> 0) & 0xfff) + 1;
7684
	crtc->base.primary->fb->height = ((val >> 0) & 0xfff) + 1;
7255
 
7685
 
7256
	val = I915_READ(DSPSTRIDE(pipe));
7686
	val = I915_READ(DSPSTRIDE(pipe));
7257
	crtc->base.primary->fb->pitches[0] = val & 0xffffff80;
7687
	crtc->base.primary->fb->pitches[0] = val & 0xffffffc0;
7258
 
7688
 
7259
	aligned_height = intel_align_height(dev, crtc->base.primary->fb->height,
7689
	aligned_height = intel_align_height(dev, crtc->base.primary->fb->height,
7260
					    plane_config->tiled);
7690
					    plane_config->tiled);
7261
 
7691
 
-
 
7692
	plane_config->size = PAGE_ALIGN(crtc->base.primary->fb->pitches[0] *
7262
	plane_config->size = 16*1024*1024;
7693
					aligned_height);
7263
 
7694
 
7264
	DRM_DEBUG_KMS("pipe/plane %d/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
7695
	DRM_DEBUG_KMS("pipe/plane %d/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
7265
		      pipe, plane, crtc->base.primary->fb->width,
7696
		      pipe, plane, crtc->base.primary->fb->width,
7266
		      crtc->base.primary->fb->height,
7697
		      crtc->base.primary->fb->height,
7267
		      crtc->base.primary->fb->bits_per_pixel, base,
7698
		      crtc->base.primary->fb->bits_per_pixel, base,
7268
		      crtc->base.primary->fb->pitches[0],
7699
		      crtc->base.primary->fb->pitches[0],
7269
		      plane_config->size);
7700
		      plane_config->size);
7270
}
7701
}
7271
 
7702
 
7272
static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
7703
static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
7273
				     struct intel_crtc_config *pipe_config)
7704
				     struct intel_crtc_config *pipe_config)
7274
{
7705
{
7275
	struct drm_device *dev = crtc->base.dev;
7706
	struct drm_device *dev = crtc->base.dev;
7276
	struct drm_i915_private *dev_priv = dev->dev_private;
7707
	struct drm_i915_private *dev_priv = dev->dev_private;
7277
	uint32_t tmp;
7708
	uint32_t tmp;
7278
 
7709
 
7279
	if (!intel_display_power_enabled(dev_priv,
7710
	if (!intel_display_power_is_enabled(dev_priv,
7280
					 POWER_DOMAIN_PIPE(crtc->pipe)))
7711
					 POWER_DOMAIN_PIPE(crtc->pipe)))
7281
		return false;
7712
		return false;
7282
 
7713
 
7283
	pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
7714
	pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
7284
	pipe_config->shared_dpll = DPLL_ID_PRIVATE;
7715
	pipe_config->shared_dpll = DPLL_ID_PRIVATE;
7285
 
7716
 
7286
	tmp = I915_READ(PIPECONF(crtc->pipe));
7717
	tmp = I915_READ(PIPECONF(crtc->pipe));
7287
	if (!(tmp & PIPECONF_ENABLE))
7718
	if (!(tmp & PIPECONF_ENABLE))
7288
		return false;
7719
		return false;
7289
 
7720
 
7290
	switch (tmp & PIPECONF_BPC_MASK) {
7721
	switch (tmp & PIPECONF_BPC_MASK) {
7291
	case PIPECONF_6BPC:
7722
	case PIPECONF_6BPC:
7292
		pipe_config->pipe_bpp = 18;
7723
		pipe_config->pipe_bpp = 18;
7293
		break;
7724
		break;
7294
	case PIPECONF_8BPC:
7725
	case PIPECONF_8BPC:
7295
		pipe_config->pipe_bpp = 24;
7726
		pipe_config->pipe_bpp = 24;
7296
		break;
7727
		break;
7297
	case PIPECONF_10BPC:
7728
	case PIPECONF_10BPC:
7298
		pipe_config->pipe_bpp = 30;
7729
		pipe_config->pipe_bpp = 30;
7299
		break;
7730
		break;
7300
	case PIPECONF_12BPC:
7731
	case PIPECONF_12BPC:
7301
		pipe_config->pipe_bpp = 36;
7732
		pipe_config->pipe_bpp = 36;
7302
		break;
7733
		break;
7303
	default:
7734
	default:
7304
		break;
7735
		break;
7305
	}
7736
	}
7306
 
7737
 
7307
	if (tmp & PIPECONF_COLOR_RANGE_SELECT)
7738
	if (tmp & PIPECONF_COLOR_RANGE_SELECT)
7308
		pipe_config->limited_color_range = true;
7739
		pipe_config->limited_color_range = true;
7309
 
7740
 
7310
	if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
7741
	if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
7311
		struct intel_shared_dpll *pll;
7742
		struct intel_shared_dpll *pll;
7312
 
7743
 
7313
		pipe_config->has_pch_encoder = true;
7744
		pipe_config->has_pch_encoder = true;
7314
 
7745
 
7315
		tmp = I915_READ(FDI_RX_CTL(crtc->pipe));
7746
		tmp = I915_READ(FDI_RX_CTL(crtc->pipe));
7316
		pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
7747
		pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
7317
					  FDI_DP_PORT_WIDTH_SHIFT) + 1;
7748
					  FDI_DP_PORT_WIDTH_SHIFT) + 1;
7318
 
7749
 
7319
		ironlake_get_fdi_m_n_config(crtc, pipe_config);
7750
		ironlake_get_fdi_m_n_config(crtc, pipe_config);
7320
 
7751
 
7321
		if (HAS_PCH_IBX(dev_priv->dev)) {
7752
		if (HAS_PCH_IBX(dev_priv->dev)) {
7322
			pipe_config->shared_dpll =
7753
			pipe_config->shared_dpll =
7323
				(enum intel_dpll_id) crtc->pipe;
7754
				(enum intel_dpll_id) crtc->pipe;
7324
		} else {
7755
		} else {
7325
			tmp = I915_READ(PCH_DPLL_SEL);
7756
			tmp = I915_READ(PCH_DPLL_SEL);
7326
			if (tmp & TRANS_DPLLB_SEL(crtc->pipe))
7757
			if (tmp & TRANS_DPLLB_SEL(crtc->pipe))
7327
				pipe_config->shared_dpll = DPLL_ID_PCH_PLL_B;
7758
				pipe_config->shared_dpll = DPLL_ID_PCH_PLL_B;
7328
			else
7759
			else
7329
				pipe_config->shared_dpll = DPLL_ID_PCH_PLL_A;
7760
				pipe_config->shared_dpll = DPLL_ID_PCH_PLL_A;
7330
		}
7761
		}
7331
 
7762
 
7332
		pll = &dev_priv->shared_dplls[pipe_config->shared_dpll];
7763
		pll = &dev_priv->shared_dplls[pipe_config->shared_dpll];
7333
 
7764
 
7334
		WARN_ON(!pll->get_hw_state(dev_priv, pll,
7765
		WARN_ON(!pll->get_hw_state(dev_priv, pll,
7335
					   &pipe_config->dpll_hw_state));
7766
					   &pipe_config->dpll_hw_state));
7336
 
7767
 
7337
		tmp = pipe_config->dpll_hw_state.dpll;
7768
		tmp = pipe_config->dpll_hw_state.dpll;
7338
		pipe_config->pixel_multiplier =
7769
		pipe_config->pixel_multiplier =
7339
			((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
7770
			((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
7340
			 >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
7771
			 >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
7341
 
7772
 
7342
		ironlake_pch_clock_get(crtc, pipe_config);
7773
		ironlake_pch_clock_get(crtc, pipe_config);
7343
	} else {
7774
	} else {
7344
		pipe_config->pixel_multiplier = 1;
7775
		pipe_config->pixel_multiplier = 1;
7345
	}
7776
	}
7346
 
7777
 
7347
	intel_get_pipe_timings(crtc, pipe_config);
7778
	intel_get_pipe_timings(crtc, pipe_config);
7348
 
7779
 
7349
	ironlake_get_pfit_config(crtc, pipe_config);
7780
	ironlake_get_pfit_config(crtc, pipe_config);
7350
 
7781
 
7351
	return true;
7782
	return true;
7352
}
7783
}
7353
 
7784
 
7354
static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
7785
static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
7355
{
7786
{
7356
	struct drm_device *dev = dev_priv->dev;
7787
	struct drm_device *dev = dev_priv->dev;
7357
	struct intel_crtc *crtc;
7788
	struct intel_crtc *crtc;
7358
 
7789
 
7359
	for_each_intel_crtc(dev, crtc)
7790
	for_each_intel_crtc(dev, crtc)
7360
		WARN(crtc->active, "CRTC for pipe %c enabled\n",
7791
		WARN(crtc->active, "CRTC for pipe %c enabled\n",
7361
		     pipe_name(crtc->pipe));
7792
		     pipe_name(crtc->pipe));
7362
 
7793
 
7363
	WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on\n");
7794
	WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on\n");
7364
	WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL enabled\n");
7795
	WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL enabled\n");
7365
	WARN(I915_READ(WRPLL_CTL1) & WRPLL_PLL_ENABLE, "WRPLL1 enabled\n");
7796
	WARN(I915_READ(WRPLL_CTL1) & WRPLL_PLL_ENABLE, "WRPLL1 enabled\n");
7366
	WARN(I915_READ(WRPLL_CTL2) & WRPLL_PLL_ENABLE, "WRPLL2 enabled\n");
7797
	WARN(I915_READ(WRPLL_CTL2) & WRPLL_PLL_ENABLE, "WRPLL2 enabled\n");
7367
	WARN(I915_READ(PCH_PP_STATUS) & PP_ON, "Panel power on\n");
7798
	WARN(I915_READ(PCH_PP_STATUS) & PP_ON, "Panel power on\n");
7368
	WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
7799
	WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
7369
	     "CPU PWM1 enabled\n");
7800
	     "CPU PWM1 enabled\n");
7370
	if (IS_HASWELL(dev))
7801
	if (IS_HASWELL(dev))
7371
	WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
7802
	WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
7372
	     "CPU PWM2 enabled\n");
7803
	     "CPU PWM2 enabled\n");
7373
	WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
7804
	WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
7374
	     "PCH PWM1 enabled\n");
7805
	     "PCH PWM1 enabled\n");
7375
	WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
7806
	WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
7376
	     "Utility pin enabled\n");
7807
	     "Utility pin enabled\n");
7377
	WARN(I915_READ(PCH_GTC_CTL) & PCH_GTC_ENABLE, "PCH GTC enabled\n");
7808
	WARN(I915_READ(PCH_GTC_CTL) & PCH_GTC_ENABLE, "PCH GTC enabled\n");
7378
 
7809
 
7379
	/*
7810
	/*
7380
	 * In theory we can still leave IRQs enabled, as long as only the HPD
7811
	 * In theory we can still leave IRQs enabled, as long as only the HPD
7381
	 * interrupts remain enabled. We used to check for that, but since it's
7812
	 * interrupts remain enabled. We used to check for that, but since it's
7382
	 * gen-specific and since we only disable LCPLL after we fully disable
7813
	 * gen-specific and since we only disable LCPLL after we fully disable
7383
	 * the interrupts, the check below should be enough.
7814
	 * the interrupts, the check below should be enough.
7384
	 */
7815
	 */
7385
	WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n");
7816
	WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n");
7386
}
7817
}
7387
 
7818
 
7388
static uint32_t hsw_read_dcomp(struct drm_i915_private *dev_priv)
7819
static uint32_t hsw_read_dcomp(struct drm_i915_private *dev_priv)
7389
{
7820
{
7390
	struct drm_device *dev = dev_priv->dev;
7821
	struct drm_device *dev = dev_priv->dev;
7391
 
7822
 
7392
	if (IS_HASWELL(dev))
7823
	if (IS_HASWELL(dev))
7393
		return I915_READ(D_COMP_HSW);
7824
		return I915_READ(D_COMP_HSW);
7394
	else
7825
	else
7395
		return I915_READ(D_COMP_BDW);
7826
		return I915_READ(D_COMP_BDW);
7396
}
7827
}
7397
 
7828
 
7398
static void hsw_write_dcomp(struct drm_i915_private *dev_priv, uint32_t val)
7829
static void hsw_write_dcomp(struct drm_i915_private *dev_priv, uint32_t val)
7399
{
7830
{
7400
	struct drm_device *dev = dev_priv->dev;
7831
	struct drm_device *dev = dev_priv->dev;
7401
 
7832
 
7402
	if (IS_HASWELL(dev)) {
7833
	if (IS_HASWELL(dev)) {
7403
		mutex_lock(&dev_priv->rps.hw_lock);
7834
		mutex_lock(&dev_priv->rps.hw_lock);
7404
		if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP,
7835
		if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP,
7405
					    val))
7836
					    val))
7406
			DRM_ERROR("Failed to write to D_COMP\n");
7837
			DRM_ERROR("Failed to write to D_COMP\n");
7407
		mutex_unlock(&dev_priv->rps.hw_lock);
7838
		mutex_unlock(&dev_priv->rps.hw_lock);
7408
	} else {
7839
	} else {
7409
		I915_WRITE(D_COMP_BDW, val);
7840
		I915_WRITE(D_COMP_BDW, val);
7410
		POSTING_READ(D_COMP_BDW);
7841
		POSTING_READ(D_COMP_BDW);
7411
	}
7842
	}
7412
}
7843
}
7413
 
7844
 
7414
/*
7845
/*
7415
 * This function implements pieces of two sequences from BSpec:
7846
 * This function implements pieces of two sequences from BSpec:
7416
 * - Sequence for display software to disable LCPLL
7847
 * - Sequence for display software to disable LCPLL
7417
 * - Sequence for display software to allow package C8+
7848
 * - Sequence for display software to allow package C8+
7418
 * The steps implemented here are just the steps that actually touch the LCPLL
7849
 * The steps implemented here are just the steps that actually touch the LCPLL
7419
 * register. Callers should take care of disabling all the display engine
7850
 * register. Callers should take care of disabling all the display engine
7420
 * functions, doing the mode unset, fixing interrupts, etc.
7851
 * functions, doing the mode unset, fixing interrupts, etc.
7421
 */
7852
 */
7422
static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
7853
static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
7423
		       bool switch_to_fclk, bool allow_power_down)
7854
		       bool switch_to_fclk, bool allow_power_down)
7424
{
7855
{
7425
	uint32_t val;
7856
	uint32_t val;
7426
 
7857
 
7427
	assert_can_disable_lcpll(dev_priv);
7858
	assert_can_disable_lcpll(dev_priv);
7428
 
7859
 
7429
	val = I915_READ(LCPLL_CTL);
7860
	val = I915_READ(LCPLL_CTL);
7430
 
7861
 
7431
	if (switch_to_fclk) {
7862
	if (switch_to_fclk) {
7432
		val |= LCPLL_CD_SOURCE_FCLK;
7863
		val |= LCPLL_CD_SOURCE_FCLK;
7433
		I915_WRITE(LCPLL_CTL, val);
7864
		I915_WRITE(LCPLL_CTL, val);
7434
 
7865
 
7435
		if (wait_for_atomic_us(I915_READ(LCPLL_CTL) &
7866
		if (wait_for_atomic_us(I915_READ(LCPLL_CTL) &
7436
				       LCPLL_CD_SOURCE_FCLK_DONE, 1))
7867
				       LCPLL_CD_SOURCE_FCLK_DONE, 1))
7437
			DRM_ERROR("Switching to FCLK failed\n");
7868
			DRM_ERROR("Switching to FCLK failed\n");
7438
 
7869
 
7439
		val = I915_READ(LCPLL_CTL);
7870
		val = I915_READ(LCPLL_CTL);
7440
	}
7871
	}
7441
 
7872
 
7442
	val |= LCPLL_PLL_DISABLE;
7873
	val |= LCPLL_PLL_DISABLE;
7443
	I915_WRITE(LCPLL_CTL, val);
7874
	I915_WRITE(LCPLL_CTL, val);
7444
	POSTING_READ(LCPLL_CTL);
7875
	POSTING_READ(LCPLL_CTL);
7445
 
7876
 
7446
	if (wait_for((I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK) == 0, 1))
7877
	if (wait_for((I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK) == 0, 1))
7447
		DRM_ERROR("LCPLL still locked\n");
7878
		DRM_ERROR("LCPLL still locked\n");
7448
 
7879
 
7449
	val = hsw_read_dcomp(dev_priv);
7880
	val = hsw_read_dcomp(dev_priv);
7450
	val |= D_COMP_COMP_DISABLE;
7881
	val |= D_COMP_COMP_DISABLE;
7451
	hsw_write_dcomp(dev_priv, val);
7882
	hsw_write_dcomp(dev_priv, val);
7452
	ndelay(100);
7883
	ndelay(100);
7453
 
7884
 
7454
	if (wait_for((hsw_read_dcomp(dev_priv) & D_COMP_RCOMP_IN_PROGRESS) == 0,
7885
	if (wait_for((hsw_read_dcomp(dev_priv) & D_COMP_RCOMP_IN_PROGRESS) == 0,
7455
		     1))
7886
		     1))
7456
		DRM_ERROR("D_COMP RCOMP still in progress\n");
7887
		DRM_ERROR("D_COMP RCOMP still in progress\n");
7457
 
7888
 
7458
	if (allow_power_down) {
7889
	if (allow_power_down) {
7459
		val = I915_READ(LCPLL_CTL);
7890
		val = I915_READ(LCPLL_CTL);
7460
		val |= LCPLL_POWER_DOWN_ALLOW;
7891
		val |= LCPLL_POWER_DOWN_ALLOW;
7461
		I915_WRITE(LCPLL_CTL, val);
7892
		I915_WRITE(LCPLL_CTL, val);
7462
		POSTING_READ(LCPLL_CTL);
7893
		POSTING_READ(LCPLL_CTL);
7463
	}
7894
	}
7464
}
7895
}
7465
 
7896
 
7466
/*
7897
/*
7467
 * Fully restores LCPLL, disallowing power down and switching back to LCPLL
7898
 * Fully restores LCPLL, disallowing power down and switching back to LCPLL
7468
 * source.
7899
 * source.
7469
 */
7900
 */
7470
static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
7901
static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
7471
{
7902
{
7472
	uint32_t val;
7903
	uint32_t val;
7473
	unsigned long irqflags;
-
 
7474
 
7904
 
7475
	val = I915_READ(LCPLL_CTL);
7905
	val = I915_READ(LCPLL_CTL);
7476
 
7906
 
7477
	if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK |
7907
	if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK |
7478
		    LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK)
7908
		    LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK)
7479
		return;
7909
		return;
7480
 
7910
 
7481
	/*
7911
	/*
7482
	 * Make sure we're not on PC8 state before disabling PC8, otherwise
7912
	 * Make sure we're not on PC8 state before disabling PC8, otherwise
7483
	 * we'll hang the machine. To prevent PC8 state, just enable force_wake.
7913
	 * we'll hang the machine. To prevent PC8 state, just enable force_wake.
7484
	 *
7914
	 *
7485
	 * The other problem is that hsw_restore_lcpll() is called as part of
7915
	 * The other problem is that hsw_restore_lcpll() is called as part of
7486
	 * the runtime PM resume sequence, so we can't just call
7916
	 * the runtime PM resume sequence, so we can't just call
7487
	 * gen6_gt_force_wake_get() because that function calls
7917
	 * gen6_gt_force_wake_get() because that function calls
7488
	 * intel_runtime_pm_get(), and we can't change the runtime PM refcount
7918
	 * intel_runtime_pm_get(), and we can't change the runtime PM refcount
7489
	 * while we are on the resume sequence. So to solve this problem we have
7919
	 * while we are on the resume sequence. So to solve this problem we have
7490
	 * to call special forcewake code that doesn't touch runtime PM and
7920
	 * to call special forcewake code that doesn't touch runtime PM and
7491
	 * doesn't enable the forcewake delayed work.
7921
	 * doesn't enable the forcewake delayed work.
7492
	 */
7922
	 */
7493
	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
7923
	spin_lock_irq(&dev_priv->uncore.lock);
7494
	if (dev_priv->uncore.forcewake_count++ == 0)
7924
	if (dev_priv->uncore.forcewake_count++ == 0)
7495
		dev_priv->uncore.funcs.force_wake_get(dev_priv, FORCEWAKE_ALL);
7925
		dev_priv->uncore.funcs.force_wake_get(dev_priv, FORCEWAKE_ALL);
7496
	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
7926
	spin_unlock_irq(&dev_priv->uncore.lock);
7497
 
7927
 
7498
	if (val & LCPLL_POWER_DOWN_ALLOW) {
7928
	if (val & LCPLL_POWER_DOWN_ALLOW) {
7499
		val &= ~LCPLL_POWER_DOWN_ALLOW;
7929
		val &= ~LCPLL_POWER_DOWN_ALLOW;
7500
		I915_WRITE(LCPLL_CTL, val);
7930
		I915_WRITE(LCPLL_CTL, val);
7501
		POSTING_READ(LCPLL_CTL);
7931
		POSTING_READ(LCPLL_CTL);
7502
	}
7932
	}
7503
 
7933
 
7504
	val = hsw_read_dcomp(dev_priv);
7934
	val = hsw_read_dcomp(dev_priv);
7505
	val |= D_COMP_COMP_FORCE;
7935
	val |= D_COMP_COMP_FORCE;
7506
	val &= ~D_COMP_COMP_DISABLE;
7936
	val &= ~D_COMP_COMP_DISABLE;
7507
	hsw_write_dcomp(dev_priv, val);
7937
	hsw_write_dcomp(dev_priv, val);
7508
 
7938
 
7509
	val = I915_READ(LCPLL_CTL);
7939
	val = I915_READ(LCPLL_CTL);
7510
	val &= ~LCPLL_PLL_DISABLE;
7940
	val &= ~LCPLL_PLL_DISABLE;
7511
	I915_WRITE(LCPLL_CTL, val);
7941
	I915_WRITE(LCPLL_CTL, val);
7512
 
7942
 
7513
	if (wait_for(I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK, 5))
7943
	if (wait_for(I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK, 5))
7514
		DRM_ERROR("LCPLL not locked yet\n");
7944
		DRM_ERROR("LCPLL not locked yet\n");
7515
 
7945
 
7516
	if (val & LCPLL_CD_SOURCE_FCLK) {
7946
	if (val & LCPLL_CD_SOURCE_FCLK) {
7517
		val = I915_READ(LCPLL_CTL);
7947
		val = I915_READ(LCPLL_CTL);
7518
		val &= ~LCPLL_CD_SOURCE_FCLK;
7948
		val &= ~LCPLL_CD_SOURCE_FCLK;
7519
		I915_WRITE(LCPLL_CTL, val);
7949
		I915_WRITE(LCPLL_CTL, val);
7520
 
7950
 
7521
		if (wait_for_atomic_us((I915_READ(LCPLL_CTL) &
7951
		if (wait_for_atomic_us((I915_READ(LCPLL_CTL) &
7522
					LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
7952
					LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
7523
			DRM_ERROR("Switching back to LCPLL failed\n");
7953
			DRM_ERROR("Switching back to LCPLL failed\n");
7524
	}
7954
	}
7525
 
7955
 
7526
	/* See the big comment above. */
7956
	/* See the big comment above. */
7527
	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
7957
	spin_lock_irq(&dev_priv->uncore.lock);
7528
	if (--dev_priv->uncore.forcewake_count == 0)
7958
	if (--dev_priv->uncore.forcewake_count == 0)
7529
		dev_priv->uncore.funcs.force_wake_put(dev_priv, FORCEWAKE_ALL);
7959
		dev_priv->uncore.funcs.force_wake_put(dev_priv, FORCEWAKE_ALL);
7530
	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
7960
	spin_unlock_irq(&dev_priv->uncore.lock);
7531
}
7961
}
7532
 
7962
 
7533
/*
7963
/*
7534
 * Package states C8 and deeper are really deep PC states that can only be
7964
 * Package states C8 and deeper are really deep PC states that can only be
7535
 * reached when all the devices on the system allow it, so even if the graphics
7965
 * reached when all the devices on the system allow it, so even if the graphics
7536
 * device allows PC8+, it doesn't mean the system will actually get to these
7966
 * device allows PC8+, it doesn't mean the system will actually get to these
7537
 * states. Our driver only allows PC8+ when going into runtime PM.
7967
 * states. Our driver only allows PC8+ when going into runtime PM.
7538
 *
7968
 *
7539
 * The requirements for PC8+ are that all the outputs are disabled, the power
7969
 * The requirements for PC8+ are that all the outputs are disabled, the power
7540
 * well is disabled and most interrupts are disabled, and these are also
7970
 * well is disabled and most interrupts are disabled, and these are also
7541
 * requirements for runtime PM. When these conditions are met, we manually do
7971
 * requirements for runtime PM. When these conditions are met, we manually do
7542
 * the other conditions: disable the interrupts, clocks and switch LCPLL refclk
7972
 * the other conditions: disable the interrupts, clocks and switch LCPLL refclk
7543
 * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard
7973
 * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard
7544
 * hang the machine.
7974
 * hang the machine.
7545
 *
7975
 *
7546
 * When we really reach PC8 or deeper states (not just when we allow it) we lose
7976
 * When we really reach PC8 or deeper states (not just when we allow it) we lose
7547
 * the state of some registers, so when we come back from PC8+ we need to
7977
 * the state of some registers, so when we come back from PC8+ we need to
7548
 * restore this state. We don't get into PC8+ if we're not in RC6, so we don't
7978
 * restore this state. We don't get into PC8+ if we're not in RC6, so we don't
7549
 * need to take care of the registers kept by RC6. Notice that this happens even
7979
 * need to take care of the registers kept by RC6. Notice that this happens even
7550
 * if we don't put the device in PCI D3 state (which is what currently happens
7980
 * if we don't put the device in PCI D3 state (which is what currently happens
7551
 * because of the runtime PM support).
7981
 * because of the runtime PM support).
7552
 *
7982
 *
7553
 * For more, read "Display Sequences for Package C8" on the hardware
7983
 * For more, read "Display Sequences for Package C8" on the hardware
7554
 * documentation.
7984
 * documentation.
7555
 */
7985
 */
7556
void hsw_enable_pc8(struct drm_i915_private *dev_priv)
7986
void hsw_enable_pc8(struct drm_i915_private *dev_priv)
7557
{
7987
{
7558
	struct drm_device *dev = dev_priv->dev;
7988
	struct drm_device *dev = dev_priv->dev;
7559
	uint32_t val;
7989
	uint32_t val;
7560
 
7990
 
7561
	DRM_DEBUG_KMS("Enabling package C8+\n");
7991
	DRM_DEBUG_KMS("Enabling package C8+\n");
7562
 
7992
 
7563
	if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
7993
	if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
7564
		val = I915_READ(SOUTH_DSPCLK_GATE_D);
7994
		val = I915_READ(SOUTH_DSPCLK_GATE_D);
7565
		val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
7995
		val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
7566
		I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
7996
		I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
7567
	}
7997
	}
7568
 
7998
 
7569
	lpt_disable_clkout_dp(dev);
7999
	lpt_disable_clkout_dp(dev);
7570
	hsw_disable_lcpll(dev_priv, true, true);
8000
	hsw_disable_lcpll(dev_priv, true, true);
7571
}
8001
}
7572
 
8002
 
7573
void hsw_disable_pc8(struct drm_i915_private *dev_priv)
8003
void hsw_disable_pc8(struct drm_i915_private *dev_priv)
7574
{
8004
{
7575
	struct drm_device *dev = dev_priv->dev;
8005
	struct drm_device *dev = dev_priv->dev;
7576
	uint32_t val;
8006
	uint32_t val;
7577
 
8007
 
7578
	DRM_DEBUG_KMS("Disabling package C8+\n");
8008
	DRM_DEBUG_KMS("Disabling package C8+\n");
7579
 
8009
 
7580
	hsw_restore_lcpll(dev_priv);
8010
	hsw_restore_lcpll(dev_priv);
7581
	lpt_init_pch_refclk(dev);
8011
	lpt_init_pch_refclk(dev);
7582
 
8012
 
7583
	if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
8013
	if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
7584
		val = I915_READ(SOUTH_DSPCLK_GATE_D);
8014
		val = I915_READ(SOUTH_DSPCLK_GATE_D);
7585
		val |= PCH_LP_PARTITION_LEVEL_DISABLE;
8015
		val |= PCH_LP_PARTITION_LEVEL_DISABLE;
7586
		I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
8016
		I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
7587
	}
8017
	}
7588
 
8018
 
7589
	intel_prepare_ddi(dev);
8019
	intel_prepare_ddi(dev);
7590
}
8020
}
7591
 
8021
 
7592
static void snb_modeset_global_resources(struct drm_device *dev)
8022
static int haswell_crtc_compute_clock(struct intel_crtc *crtc)
7593
{
8023
{
7594
	modeset_update_crtc_power_domains(dev);
8024
	if (!intel_ddi_pll_select(crtc))
7595
}
8025
		return -EINVAL;
7596
 
8026
 
7597
static void haswell_modeset_global_resources(struct drm_device *dev)
8027
	crtc->lowfreq_avail = false;
7598
{
8028
 
7599
	modeset_update_crtc_power_domains(dev);
8029
	return 0;
7600
}
8030
}
7601
 
8031
 
7602
static int haswell_crtc_mode_set(struct drm_crtc *crtc,
8032
static void skylake_get_ddi_pll(struct drm_i915_private *dev_priv,
7603
				 int x, int y,
8033
				enum port port,
-
 
8034
				struct intel_crtc_config *pipe_config)
7604
				 struct drm_framebuffer *fb)
8035
{
7605
{
-
 
7606
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8036
	u32 temp;
-
 
8037
 
-
 
8038
	temp = I915_READ(DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port);
-
 
8039
	pipe_config->ddi_pll_sel = temp >> (port * 3 + 1);
-
 
8040
 
-
 
8041
	switch (pipe_config->ddi_pll_sel) {
-
 
8042
	case SKL_DPLL1:
-
 
8043
		pipe_config->shared_dpll = DPLL_ID_SKL_DPLL1;
-
 
8044
		break;
-
 
8045
	case SKL_DPLL2:
-
 
8046
		pipe_config->shared_dpll = DPLL_ID_SKL_DPLL2;
-
 
8047
		break;
-
 
8048
	case SKL_DPLL3:
-
 
8049
		pipe_config->shared_dpll = DPLL_ID_SKL_DPLL3;
-
 
8050
		break;
-
 
8051
	}
-
 
8052
}
-
 
8053
 
-
 
8054
static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv,
-
 
8055
				enum port port,
-
 
8056
				struct intel_crtc_config *pipe_config)
7607
 
8057
{
-
 
8058
	pipe_config->ddi_pll_sel = I915_READ(PORT_CLK_SEL(port));
-
 
8059
 
-
 
8060
	switch (pipe_config->ddi_pll_sel) {
-
 
8061
	case PORT_CLK_SEL_WRPLL1:
7608
	if (!intel_ddi_pll_select(intel_crtc))
8062
		pipe_config->shared_dpll = DPLL_ID_WRPLL1;
7609
		return -EINVAL;
8063
		break;
7610
 
8064
	case PORT_CLK_SEL_WRPLL2:
7611
	intel_crtc->lowfreq_avail = false;
8065
		pipe_config->shared_dpll = DPLL_ID_WRPLL2;
7612
 
8066
		break;
7613
	return 0;
8067
	}
7614
}
8068
}
7615
 
8069
 
7616
static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
8070
static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
7617
				       struct intel_crtc_config *pipe_config)
8071
				       struct intel_crtc_config *pipe_config)
7618
{
8072
{
7619
	struct drm_device *dev = crtc->base.dev;
8073
	struct drm_device *dev = crtc->base.dev;
7620
	struct drm_i915_private *dev_priv = dev->dev_private;
8074
	struct drm_i915_private *dev_priv = dev->dev_private;
7621
	struct intel_shared_dpll *pll;
8075
	struct intel_shared_dpll *pll;
7622
	enum port port;
8076
	enum port port;
7623
	uint32_t tmp;
8077
	uint32_t tmp;
7624
 
8078
 
7625
	tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder));
8079
	tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder));
7626
 
8080
 
7627
	port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT;
8081
	port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT;
7628
 
-
 
7629
	pipe_config->ddi_pll_sel = I915_READ(PORT_CLK_SEL(port));
-
 
7630
 
-
 
7631
	switch (pipe_config->ddi_pll_sel) {
8082
 
7632
	case PORT_CLK_SEL_WRPLL1:
8083
	if (IS_SKYLAKE(dev))
7633
		pipe_config->shared_dpll = DPLL_ID_WRPLL1;
8084
		skylake_get_ddi_pll(dev_priv, port, pipe_config);
7634
		break;
-
 
7635
	case PORT_CLK_SEL_WRPLL2:
8085
	else
7636
		pipe_config->shared_dpll = DPLL_ID_WRPLL2;
-
 
7637
		break;
-
 
7638
	}
8086
		haswell_get_ddi_pll(dev_priv, port, pipe_config);
7639
 
8087
 
7640
	if (pipe_config->shared_dpll >= 0) {
8088
	if (pipe_config->shared_dpll >= 0) {
7641
		pll = &dev_priv->shared_dplls[pipe_config->shared_dpll];
8089
		pll = &dev_priv->shared_dplls[pipe_config->shared_dpll];
7642
 
8090
 
7643
		WARN_ON(!pll->get_hw_state(dev_priv, pll,
8091
		WARN_ON(!pll->get_hw_state(dev_priv, pll,
7644
					   &pipe_config->dpll_hw_state));
8092
					   &pipe_config->dpll_hw_state));
7645
	}
8093
	}
7646
 
8094
 
7647
	/*
8095
	/*
7648
	 * Haswell has only FDI/PCH transcoder A. It is which is connected to
8096
	 * Haswell has only FDI/PCH transcoder A. It is which is connected to
7649
	 * DDI E. So just check whether this pipe is wired to DDI E and whether
8097
	 * DDI E. So just check whether this pipe is wired to DDI E and whether
7650
	 * the PCH transcoder is on.
8098
	 * the PCH transcoder is on.
7651
	 */
8099
	 */
-
 
8100
	if (INTEL_INFO(dev)->gen < 9 &&
7652
	if ((port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
8101
	    (port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
7653
		pipe_config->has_pch_encoder = true;
8102
		pipe_config->has_pch_encoder = true;
7654
 
8103
 
7655
		tmp = I915_READ(FDI_RX_CTL(PIPE_A));
8104
		tmp = I915_READ(FDI_RX_CTL(PIPE_A));
7656
		pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
8105
		pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
7657
					  FDI_DP_PORT_WIDTH_SHIFT) + 1;
8106
					  FDI_DP_PORT_WIDTH_SHIFT) + 1;
7658
 
8107
 
7659
		ironlake_get_fdi_m_n_config(crtc, pipe_config);
8108
		ironlake_get_fdi_m_n_config(crtc, pipe_config);
7660
	}
8109
	}
7661
}
8110
}
7662
 
8111
 
7663
static bool haswell_get_pipe_config(struct intel_crtc *crtc,
8112
static bool haswell_get_pipe_config(struct intel_crtc *crtc,
7664
				    struct intel_crtc_config *pipe_config)
8113
				    struct intel_crtc_config *pipe_config)
7665
{
8114
{
7666
	struct drm_device *dev = crtc->base.dev;
8115
	struct drm_device *dev = crtc->base.dev;
7667
	struct drm_i915_private *dev_priv = dev->dev_private;
8116
	struct drm_i915_private *dev_priv = dev->dev_private;
7668
	enum intel_display_power_domain pfit_domain;
8117
	enum intel_display_power_domain pfit_domain;
7669
	uint32_t tmp;
8118
	uint32_t tmp;
7670
 
8119
 
7671
	if (!intel_display_power_enabled(dev_priv,
8120
	if (!intel_display_power_is_enabled(dev_priv,
7672
					 POWER_DOMAIN_PIPE(crtc->pipe)))
8121
					 POWER_DOMAIN_PIPE(crtc->pipe)))
7673
		return false;
8122
		return false;
7674
 
8123
 
7675
	pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
8124
	pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
7676
	pipe_config->shared_dpll = DPLL_ID_PRIVATE;
8125
	pipe_config->shared_dpll = DPLL_ID_PRIVATE;
7677
 
8126
 
7678
	tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
8127
	tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
7679
	if (tmp & TRANS_DDI_FUNC_ENABLE) {
8128
	if (tmp & TRANS_DDI_FUNC_ENABLE) {
7680
		enum pipe trans_edp_pipe;
8129
		enum pipe trans_edp_pipe;
7681
		switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
8130
		switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
7682
		default:
8131
		default:
7683
			WARN(1, "unknown pipe linked to edp transcoder\n");
8132
			WARN(1, "unknown pipe linked to edp transcoder\n");
7684
		case TRANS_DDI_EDP_INPUT_A_ONOFF:
8133
		case TRANS_DDI_EDP_INPUT_A_ONOFF:
7685
		case TRANS_DDI_EDP_INPUT_A_ON:
8134
		case TRANS_DDI_EDP_INPUT_A_ON:
7686
			trans_edp_pipe = PIPE_A;
8135
			trans_edp_pipe = PIPE_A;
7687
			break;
8136
			break;
7688
		case TRANS_DDI_EDP_INPUT_B_ONOFF:
8137
		case TRANS_DDI_EDP_INPUT_B_ONOFF:
7689
			trans_edp_pipe = PIPE_B;
8138
			trans_edp_pipe = PIPE_B;
7690
			break;
8139
			break;
7691
		case TRANS_DDI_EDP_INPUT_C_ONOFF:
8140
		case TRANS_DDI_EDP_INPUT_C_ONOFF:
7692
			trans_edp_pipe = PIPE_C;
8141
			trans_edp_pipe = PIPE_C;
7693
			break;
8142
			break;
7694
		}
8143
		}
7695
 
8144
 
7696
		if (trans_edp_pipe == crtc->pipe)
8145
		if (trans_edp_pipe == crtc->pipe)
7697
			pipe_config->cpu_transcoder = TRANSCODER_EDP;
8146
			pipe_config->cpu_transcoder = TRANSCODER_EDP;
7698
	}
8147
	}
7699
 
8148
 
7700
	if (!intel_display_power_enabled(dev_priv,
8149
	if (!intel_display_power_is_enabled(dev_priv,
7701
			POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder)))
8150
			POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder)))
7702
		return false;
8151
		return false;
7703
 
8152
 
7704
	tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
8153
	tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
7705
	if (!(tmp & PIPECONF_ENABLE))
8154
	if (!(tmp & PIPECONF_ENABLE))
7706
		return false;
8155
		return false;
7707
 
8156
 
7708
	haswell_get_ddi_port_state(crtc, pipe_config);
8157
	haswell_get_ddi_port_state(crtc, pipe_config);
7709
 
8158
 
7710
	intel_get_pipe_timings(crtc, pipe_config);
8159
	intel_get_pipe_timings(crtc, pipe_config);
7711
 
8160
 
7712
	pfit_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
8161
	pfit_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
7713
	if (intel_display_power_enabled(dev_priv, pfit_domain))
8162
	if (intel_display_power_is_enabled(dev_priv, pfit_domain)) {
-
 
8163
		if (IS_SKYLAKE(dev))
-
 
8164
			skylake_get_pfit_config(crtc, pipe_config);
-
 
8165
		else
7714
		ironlake_get_pfit_config(crtc, pipe_config);
8166
		ironlake_get_pfit_config(crtc, pipe_config);
-
 
8167
	}
7715
 
8168
 
7716
	if (IS_HASWELL(dev))
8169
	if (IS_HASWELL(dev))
7717
	pipe_config->ips_enabled = hsw_crtc_supports_ips(crtc) &&
8170
	pipe_config->ips_enabled = hsw_crtc_supports_ips(crtc) &&
7718
				   (I915_READ(IPS_CTL) & IPS_ENABLE);
8171
				   (I915_READ(IPS_CTL) & IPS_ENABLE);
-
 
8172
 
-
 
8173
	if (pipe_config->cpu_transcoder != TRANSCODER_EDP) {
-
 
8174
		pipe_config->pixel_multiplier =
-
 
8175
			I915_READ(PIPE_MULT(pipe_config->cpu_transcoder)) + 1;
7719
 
8176
	} else {
7720
	pipe_config->pixel_multiplier = 1;
-
 
7721
 
-
 
7722
	return true;
-
 
7723
}
-
 
7724
 
-
 
7725
static struct {
-
 
7726
	int clock;
-
 
7727
	u32 config;
-
 
7728
} hdmi_audio_clock[] = {
-
 
7729
	{ DIV_ROUND_UP(25200 * 1000, 1001), AUD_CONFIG_PIXEL_CLOCK_HDMI_25175 },
-
 
7730
	{ 25200, AUD_CONFIG_PIXEL_CLOCK_HDMI_25200 }, /* default per bspec */
-
 
7731
	{ 27000, AUD_CONFIG_PIXEL_CLOCK_HDMI_27000 },
-
 
7732
	{ 27000 * 1001 / 1000, AUD_CONFIG_PIXEL_CLOCK_HDMI_27027 },
-
 
7733
	{ 54000, AUD_CONFIG_PIXEL_CLOCK_HDMI_54000 },
-
 
7734
	{ 54000 * 1001 / 1000, AUD_CONFIG_PIXEL_CLOCK_HDMI_54054 },
-
 
7735
	{ DIV_ROUND_UP(74250 * 1000, 1001), AUD_CONFIG_PIXEL_CLOCK_HDMI_74176 },
-
 
7736
	{ 74250, AUD_CONFIG_PIXEL_CLOCK_HDMI_74250 },
-
 
7737
	{ DIV_ROUND_UP(148500 * 1000, 1001), AUD_CONFIG_PIXEL_CLOCK_HDMI_148352 },
-
 
7738
	{ 148500, AUD_CONFIG_PIXEL_CLOCK_HDMI_148500 },
-
 
7739
};
-
 
7740
 
-
 
7741
/* get AUD_CONFIG_PIXEL_CLOCK_HDMI_* value for mode */
-
 
7742
static u32 audio_config_hdmi_pixel_clock(struct drm_display_mode *mode)
-
 
7743
{
-
 
7744
	int i;
-
 
7745
 
-
 
7746
	for (i = 0; i < ARRAY_SIZE(hdmi_audio_clock); i++) {
-
 
7747
		if (mode->clock == hdmi_audio_clock[i].clock)
-
 
7748
			break;
-
 
7749
	}
-
 
7750
 
-
 
7751
	if (i == ARRAY_SIZE(hdmi_audio_clock)) {
-
 
7752
		DRM_DEBUG_KMS("HDMI audio pixel clock setting for %d not found, falling back to defaults\n", mode->clock);
-
 
7753
		i = 1;
8177
	pipe_config->pixel_multiplier = 1;
7754
	}
-
 
7755
 
-
 
7756
	DRM_DEBUG_KMS("Configuring HDMI audio for pixel clock %d (0x%08x)\n",
-
 
7757
		      hdmi_audio_clock[i].clock,
-
 
7758
		      hdmi_audio_clock[i].config);
-
 
7759
 
-
 
7760
	return hdmi_audio_clock[i].config;
-
 
7761
}
-
 
7762
 
-
 
7763
static bool intel_eld_uptodate(struct drm_connector *connector,
-
 
7764
			       int reg_eldv, uint32_t bits_eldv,
-
 
7765
			       int reg_elda, uint32_t bits_elda,
-
 
7766
			       int reg_edid)
-
 
7767
{
-
 
7768
	struct drm_i915_private *dev_priv = connector->dev->dev_private;
-
 
7769
	uint8_t *eld = connector->eld;
-
 
7770
	uint32_t i;
-
 
7771
 
-
 
7772
	i = I915_READ(reg_eldv);
-
 
7773
	i &= bits_eldv;
-
 
7774
 
-
 
7775
	if (!eld[0])
-
 
7776
		return !i;
-
 
7777
 
-
 
7778
	if (!i)
-
 
7779
		return false;
-
 
7780
 
-
 
7781
	i = I915_READ(reg_elda);
-
 
7782
	i &= ~bits_elda;
-
 
7783
	I915_WRITE(reg_elda, i);
-
 
7784
 
-
 
7785
	for (i = 0; i < eld[2]; i++)
-
 
7786
		if (I915_READ(reg_edid) != *((uint32_t *)eld + i))
-
 
7787
			return false;
8178
	}
7788
 
8179
 
7789
	return true;
8180
	return true;
7790
}
8181
}
7791
 
8182
 
7792
static void g4x_write_eld(struct drm_connector *connector,
-
 
7793
			  struct drm_crtc *crtc,
-
 
7794
			  struct drm_display_mode *mode)
-
 
7795
{
-
 
7796
	struct drm_i915_private *dev_priv = connector->dev->dev_private;
-
 
7797
	uint8_t *eld = connector->eld;
-
 
7798
	uint32_t eldv;
-
 
7799
	uint32_t len;
-
 
7800
	uint32_t i;
-
 
7801
 
-
 
7802
	i = I915_READ(G4X_AUD_VID_DID);
-
 
7803
 
-
 
7804
	if (i == INTEL_AUDIO_DEVBLC || i == INTEL_AUDIO_DEVCL)
-
 
7805
		eldv = G4X_ELDV_DEVCL_DEVBLC;
-
 
7806
	else
-
 
7807
		eldv = G4X_ELDV_DEVCTG;
-
 
7808
 
-
 
7809
	if (intel_eld_uptodate(connector,
-
 
7810
			       G4X_AUD_CNTL_ST, eldv,
-
 
7811
			       G4X_AUD_CNTL_ST, G4X_ELD_ADDR,
-
 
7812
			       G4X_HDMIW_HDMIEDID))
-
 
7813
		return;
-
 
7814
 
-
 
7815
	i = I915_READ(G4X_AUD_CNTL_ST);
-
 
7816
	i &= ~(eldv | G4X_ELD_ADDR);
-
 
7817
	len = (i >> 9) & 0x1f;		/* ELD buffer size */
-
 
7818
	I915_WRITE(G4X_AUD_CNTL_ST, i);
-
 
7819
 
-
 
7820
	if (!eld[0])
-
 
7821
		return;
-
 
7822
 
-
 
7823
	len = min_t(uint8_t, eld[2], len);
-
 
7824
	DRM_DEBUG_DRIVER("ELD size %d\n", len);
-
 
7825
	for (i = 0; i < len; i++)
-
 
7826
		I915_WRITE(G4X_HDMIW_HDMIEDID, *((uint32_t *)eld + i));
-
 
7827
 
-
 
7828
	i = I915_READ(G4X_AUD_CNTL_ST);
-
 
7829
	i |= eldv;
-
 
7830
	I915_WRITE(G4X_AUD_CNTL_ST, i);
-
 
7831
}
-
 
7832
 
-
 
7833
static void haswell_write_eld(struct drm_connector *connector,
-
 
7834
			      struct drm_crtc *crtc,
-
 
7835
			      struct drm_display_mode *mode)
-
 
7836
{
-
 
7837
	struct drm_i915_private *dev_priv = connector->dev->dev_private;
-
 
7838
	uint8_t *eld = connector->eld;
-
 
7839
	uint32_t eldv;
-
 
7840
	uint32_t i;
-
 
7841
	int len;
-
 
7842
	int pipe = to_intel_crtc(crtc)->pipe;
-
 
7843
	int tmp;
-
 
7844
 
-
 
7845
	int hdmiw_hdmiedid = HSW_AUD_EDID_DATA(pipe);
-
 
7846
	int aud_cntl_st = HSW_AUD_DIP_ELD_CTRL(pipe);
-
 
7847
	int aud_config = HSW_AUD_CFG(pipe);
-
 
7848
	int aud_cntrl_st2 = HSW_AUD_PIN_ELD_CP_VLD;
-
 
7849
 
-
 
7850
	/* Audio output enable */
-
 
7851
	DRM_DEBUG_DRIVER("HDMI audio: enable codec\n");
-
 
7852
	tmp = I915_READ(aud_cntrl_st2);
-
 
7853
	tmp |= (AUDIO_OUTPUT_ENABLE_A << (pipe * 4));
-
 
7854
	I915_WRITE(aud_cntrl_st2, tmp);
-
 
7855
	POSTING_READ(aud_cntrl_st2);
-
 
7856
 
-
 
7857
	assert_pipe_disabled(dev_priv, to_intel_crtc(crtc)->pipe);
-
 
7858
 
-
 
7859
	/* Set ELD valid state */
-
 
7860
	tmp = I915_READ(aud_cntrl_st2);
-
 
7861
	DRM_DEBUG_DRIVER("HDMI audio: pin eld vld status=0x%08x\n", tmp);
-
 
7862
	tmp |= (AUDIO_ELD_VALID_A << (pipe * 4));
-
 
7863
	I915_WRITE(aud_cntrl_st2, tmp);
-
 
7864
	tmp = I915_READ(aud_cntrl_st2);
-
 
7865
	DRM_DEBUG_DRIVER("HDMI audio: eld vld status=0x%08x\n", tmp);
-
 
7866
 
-
 
7867
	/* Enable HDMI mode */
-
 
7868
	tmp = I915_READ(aud_config);
-
 
7869
	DRM_DEBUG_DRIVER("HDMI audio: audio conf: 0x%08x\n", tmp);
-
 
7870
	/* clear N_programing_enable and N_value_index */
-
 
7871
	tmp &= ~(AUD_CONFIG_N_VALUE_INDEX | AUD_CONFIG_N_PROG_ENABLE);
-
 
7872
	I915_WRITE(aud_config, tmp);
-
 
7873
 
-
 
7874
	DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(pipe));
-
 
7875
 
-
 
7876
	eldv = AUDIO_ELD_VALID_A << (pipe * 4);
-
 
7877
 
-
 
7878
	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
-
 
7879
		DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
-
 
7880
		eld[5] |= (1 << 2);	/* Conn_Type, 0x1 = DisplayPort */
-
 
7881
		I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */
-
 
7882
	} else {
-
 
7883
		I915_WRITE(aud_config, audio_config_hdmi_pixel_clock(mode));
-
 
7884
	}
-
 
7885
 
-
 
7886
	if (intel_eld_uptodate(connector,
-
 
7887
			       aud_cntrl_st2, eldv,
-
 
7888
			       aud_cntl_st, IBX_ELD_ADDRESS,
-
 
7889
			       hdmiw_hdmiedid))
-
 
7890
		return;
-
 
7891
 
-
 
7892
	i = I915_READ(aud_cntrl_st2);
-
 
7893
	i &= ~eldv;
-
 
7894
	I915_WRITE(aud_cntrl_st2, i);
-
 
7895
 
-
 
7896
	if (!eld[0])
-
 
7897
		return;
-
 
7898
 
-
 
7899
	i = I915_READ(aud_cntl_st);
-
 
7900
	i &= ~IBX_ELD_ADDRESS;
-
 
7901
	I915_WRITE(aud_cntl_st, i);
-
 
7902
	i = (i >> 29) & DIP_PORT_SEL_MASK;		/* DIP_Port_Select, 0x1 = PortB */
-
 
7903
	DRM_DEBUG_DRIVER("port num:%d\n", i);
-
 
7904
 
-
 
7905
	len = min_t(uint8_t, eld[2], 21);	/* 84 bytes of hw ELD buffer */
-
 
7906
	DRM_DEBUG_DRIVER("ELD size %d\n", len);
-
 
7907
	for (i = 0; i < len; i++)
-
 
7908
		I915_WRITE(hdmiw_hdmiedid, *((uint32_t *)eld + i));
-
 
7909
 
-
 
7910
	i = I915_READ(aud_cntrl_st2);
-
 
7911
	i |= eldv;
-
 
7912
	I915_WRITE(aud_cntrl_st2, i);
-
 
7913
 
-
 
7914
}
-
 
7915
 
-
 
7916
static void ironlake_write_eld(struct drm_connector *connector,
-
 
7917
			       struct drm_crtc *crtc,
-
 
7918
			       struct drm_display_mode *mode)
-
 
7919
{
-
 
7920
	struct drm_i915_private *dev_priv = connector->dev->dev_private;
-
 
7921
	uint8_t *eld = connector->eld;
-
 
7922
	uint32_t eldv;
-
 
7923
	uint32_t i;
-
 
7924
	int len;
-
 
7925
	int hdmiw_hdmiedid;
-
 
7926
	int aud_config;
-
 
7927
	int aud_cntl_st;
-
 
7928
	int aud_cntrl_st2;
-
 
7929
	int pipe = to_intel_crtc(crtc)->pipe;
-
 
7930
 
-
 
7931
	if (HAS_PCH_IBX(connector->dev)) {
-
 
7932
		hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID(pipe);
-
 
7933
		aud_config = IBX_AUD_CFG(pipe);
-
 
7934
		aud_cntl_st = IBX_AUD_CNTL_ST(pipe);
-
 
7935
		aud_cntrl_st2 = IBX_AUD_CNTL_ST2;
-
 
7936
	} else if (IS_VALLEYVIEW(connector->dev)) {
-
 
7937
		hdmiw_hdmiedid = VLV_HDMIW_HDMIEDID(pipe);
-
 
7938
		aud_config = VLV_AUD_CFG(pipe);
-
 
7939
		aud_cntl_st = VLV_AUD_CNTL_ST(pipe);
-
 
7940
		aud_cntrl_st2 = VLV_AUD_CNTL_ST2;
-
 
7941
	} else {
-
 
7942
		hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID(pipe);
-
 
7943
		aud_config = CPT_AUD_CFG(pipe);
-
 
7944
		aud_cntl_st = CPT_AUD_CNTL_ST(pipe);
-
 
7945
		aud_cntrl_st2 = CPT_AUD_CNTRL_ST2;
-
 
7946
	}
-
 
7947
 
-
 
7948
	DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(pipe));
-
 
7949
 
-
 
7950
	if (IS_VALLEYVIEW(connector->dev))  {
-
 
7951
		struct intel_encoder *intel_encoder;
-
 
7952
		struct intel_digital_port *intel_dig_port;
-
 
7953
 
-
 
7954
		intel_encoder = intel_attached_encoder(connector);
-
 
7955
		intel_dig_port = enc_to_dig_port(&intel_encoder->base);
-
 
7956
		i = intel_dig_port->port;
-
 
7957
	} else {
-
 
7958
	i = I915_READ(aud_cntl_st);
-
 
7959
		i = (i >> 29) & DIP_PORT_SEL_MASK;
-
 
7960
		/* DIP_Port_Select, 0x1 = PortB */
-
 
7961
	}
-
 
7962
 
-
 
7963
	if (!i) {
-
 
7964
		DRM_DEBUG_DRIVER("Audio directed to unknown port\n");
-
 
7965
		/* operate blindly on all ports */
-
 
7966
		eldv = IBX_ELD_VALIDB;
-
 
7967
		eldv |= IBX_ELD_VALIDB << 4;
-
 
7968
		eldv |= IBX_ELD_VALIDB << 8;
-
 
7969
	} else {
-
 
7970
		DRM_DEBUG_DRIVER("ELD on port %c\n", port_name(i));
-
 
7971
		eldv = IBX_ELD_VALIDB << ((i - 1) * 4);
-
 
7972
	}
-
 
7973
 
-
 
7974
	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
-
 
7975
		DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
-
 
7976
		eld[5] |= (1 << 2);	/* Conn_Type, 0x1 = DisplayPort */
-
 
7977
		I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */
-
 
7978
	} else {
-
 
7979
		I915_WRITE(aud_config, audio_config_hdmi_pixel_clock(mode));
-
 
7980
	}
-
 
7981
 
-
 
7982
	if (intel_eld_uptodate(connector,
-
 
7983
			       aud_cntrl_st2, eldv,
-
 
7984
			       aud_cntl_st, IBX_ELD_ADDRESS,
-
 
7985
			       hdmiw_hdmiedid))
-
 
7986
		return;
-
 
7987
 
-
 
7988
	i = I915_READ(aud_cntrl_st2);
-
 
7989
	i &= ~eldv;
-
 
7990
	I915_WRITE(aud_cntrl_st2, i);
-
 
7991
 
-
 
7992
	if (!eld[0])
-
 
7993
		return;
-
 
7994
 
-
 
7995
	i = I915_READ(aud_cntl_st);
-
 
7996
	i &= ~IBX_ELD_ADDRESS;
-
 
7997
	I915_WRITE(aud_cntl_st, i);
-
 
7998
 
-
 
7999
	len = min_t(uint8_t, eld[2], 21);	/* 84 bytes of hw ELD buffer */
-
 
8000
	DRM_DEBUG_DRIVER("ELD size %d\n", len);
-
 
8001
	for (i = 0; i < len; i++)
-
 
8002
		I915_WRITE(hdmiw_hdmiedid, *((uint32_t *)eld + i));
-
 
8003
 
-
 
8004
	i = I915_READ(aud_cntrl_st2);
-
 
8005
	i |= eldv;
-
 
8006
	I915_WRITE(aud_cntrl_st2, i);
-
 
8007
}
-
 
8008
 
-
 
8009
void intel_write_eld(struct drm_encoder *encoder,
-
 
8010
		     struct drm_display_mode *mode)
8183
static void i845_update_cursor(struct drm_crtc *crtc, u32 base)
8011
{
-
 
8012
	struct drm_crtc *crtc = encoder->crtc;
-
 
8013
	struct drm_connector *connector;
8184
{
8014
	struct drm_device *dev = encoder->dev;
8185
	struct drm_device *dev = crtc->dev;
-
 
8186
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
8187
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-
 
8188
	uint32_t cntl = 0, size = 0;
8015
	struct drm_i915_private *dev_priv = dev->dev_private;
8189
 
8016
 
8190
	if (base) {
8017
	connector = drm_select_eld(encoder, mode);
8191
		unsigned int width = intel_crtc->cursor_width;
-
 
8192
		unsigned int height = intel_crtc->cursor_height;
-
 
8193
		unsigned int stride = roundup_pow_of_two(width) * 4;
8018
	if (!connector)
8194
 
8019
		return;
8195
		switch (stride) {
8020
 
8196
		default:
8021
	DRM_DEBUG_DRIVER("ELD on [CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
8197
			WARN_ONCE(1, "Invalid cursor width/stride, width=%u, stride=%u\n",
-
 
8198
				  width, stride);
-
 
8199
			stride = 256;
-
 
8200
			/* fallthrough */
-
 
8201
		case 256:
8022
			 connector->base.id,
8202
		case 512:
-
 
8203
		case 1024:
-
 
8204
		case 2048:
-
 
8205
			break;
-
 
8206
	}
8023
			 connector->name,
8207
 
8024
			 connector->encoder->base.id,
8208
		cntl |= CURSOR_ENABLE |
8025
			 connector->encoder->name);
-
 
8026
 
8209
			CURSOR_GAMMA_ENABLE |
8027
	connector->eld[6] = drm_av_sync_delay(connector, mode) / 2;
-
 
8028
 
-
 
8029
	if (dev_priv->display.write_eld)
8210
			CURSOR_FORMAT_ARGB |
8030
		dev_priv->display.write_eld(connector, crtc, mode);
8211
			CURSOR_STRIDE(stride);
8031
}
8212
 
8032
 
-
 
8033
static void i845_update_cursor(struct drm_crtc *crtc, u32 base)
-
 
8034
{
8213
		size = (height << 12) | width;
8035
	struct drm_device *dev = crtc->dev;
8214
	}
8036
	struct drm_i915_private *dev_priv = dev->dev_private;
8215
 
8037
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8216
	if (intel_crtc->cursor_cntl != 0 &&
8038
	uint32_t cntl;
-
 
8039
 
8217
	    (intel_crtc->cursor_base != base ||
8040
	if (base != intel_crtc->cursor_base) {
8218
	     intel_crtc->cursor_size != size ||
8041
		/* On these chipsets we can only modify the base whilst
8219
	     intel_crtc->cursor_cntl != cntl)) {
8042
		 * the cursor is disabled.
8220
		/* On these chipsets we can only modify the base/size/stride
8043
		 */
8221
		 * whilst the cursor is disabled.
8044
		if (intel_crtc->cursor_cntl) {
8222
		 */
8045
			I915_WRITE(_CURACNTR, 0);
8223
			I915_WRITE(_CURACNTR, 0);
8046
			POSTING_READ(_CURACNTR);
8224
			POSTING_READ(_CURACNTR);
8047
			intel_crtc->cursor_cntl = 0;
8225
			intel_crtc->cursor_cntl = 0;
8048
		}
8226
		}
-
 
8227
 
8049
 
8228
	if (intel_crtc->cursor_base != base) {
-
 
8229
		I915_WRITE(_CURABASE, base);
-
 
8230
		intel_crtc->cursor_base = base;
-
 
8231
	}
-
 
8232
 
8050
		I915_WRITE(_CURABASE, base);
8233
	if (intel_crtc->cursor_size != size) {
-
 
8234
		I915_WRITE(CURSIZE, size);
8051
		POSTING_READ(_CURABASE);
8235
		intel_crtc->cursor_size = size;
8052
	}
-
 
8053
 
-
 
8054
		/* XXX width must be 64, stride 256 => 0x00 << 28 */
-
 
8055
	cntl = 0;
-
 
8056
	if (base)
-
 
8057
		cntl = (CURSOR_ENABLE |
-
 
8058
			CURSOR_GAMMA_ENABLE |
8236
	}
8059
			CURSOR_FORMAT_ARGB);
8237
 
8060
	if (intel_crtc->cursor_cntl != cntl) {
8238
	if (intel_crtc->cursor_cntl != cntl) {
8061
	I915_WRITE(_CURACNTR, cntl);
8239
	I915_WRITE(_CURACNTR, cntl);
8062
		POSTING_READ(_CURACNTR);
8240
		POSTING_READ(_CURACNTR);
8063
		intel_crtc->cursor_cntl = cntl;
8241
		intel_crtc->cursor_cntl = cntl;
8064
	}
8242
	}
8065
}
8243
}
8066
 
8244
 
8067
static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
8245
static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
8068
{
8246
{
8069
	struct drm_device *dev = crtc->dev;
8247
	struct drm_device *dev = crtc->dev;
8070
	struct drm_i915_private *dev_priv = dev->dev_private;
8248
	struct drm_i915_private *dev_priv = dev->dev_private;
8071
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8249
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8072
	int pipe = intel_crtc->pipe;
8250
	int pipe = intel_crtc->pipe;
8073
	uint32_t cntl;
8251
	uint32_t cntl;
8074
 
8252
 
8075
	cntl = 0;
8253
	cntl = 0;
8076
		if (base) {
8254
		if (base) {
8077
		cntl = MCURSOR_GAMMA_ENABLE;
8255
		cntl = MCURSOR_GAMMA_ENABLE;
8078
		switch (intel_crtc->cursor_width) {
8256
		switch (intel_crtc->cursor_width) {
8079
			case 64:
8257
			case 64:
8080
				cntl |= CURSOR_MODE_64_ARGB_AX;
8258
				cntl |= CURSOR_MODE_64_ARGB_AX;
8081
				break;
8259
				break;
8082
			case 128:
8260
			case 128:
8083
				cntl |= CURSOR_MODE_128_ARGB_AX;
8261
				cntl |= CURSOR_MODE_128_ARGB_AX;
8084
				break;
8262
				break;
8085
			case 256:
8263
			case 256:
8086
				cntl |= CURSOR_MODE_256_ARGB_AX;
8264
				cntl |= CURSOR_MODE_256_ARGB_AX;
8087
				break;
8265
				break;
8088
			default:
8266
			default:
8089
				WARN_ON(1);
8267
				WARN_ON(1);
8090
				return;
8268
				return;
8091
			}
8269
			}
8092
			cntl |= pipe << 28; /* Connect to correct pipe */
8270
			cntl |= pipe << 28; /* Connect to correct pipe */
8093
		}
-
 
8094
	if (intel_crtc->cursor_cntl != cntl) {
-
 
8095
		I915_WRITE(CURCNTR(pipe), cntl);
-
 
8096
		POSTING_READ(CURCNTR(pipe));
-
 
8097
		intel_crtc->cursor_cntl = cntl;
-
 
8098
	}
-
 
8099
 
-
 
8100
	/* and commit changes on next vblank */
-
 
8101
	I915_WRITE(CURBASE(pipe), base);
-
 
8102
	POSTING_READ(CURBASE(pipe));
-
 
8103
}
-
 
8104
 
-
 
8105
static void ivb_update_cursor(struct drm_crtc *crtc, u32 base)
-
 
8106
{
-
 
8107
	struct drm_device *dev = crtc->dev;
-
 
8108
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
8109
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-
 
8110
	int pipe = intel_crtc->pipe;
-
 
8111
	uint32_t cntl;
-
 
8112
 
-
 
8113
	cntl = 0;
-
 
8114
		if (base) {
-
 
8115
		cntl = MCURSOR_GAMMA_ENABLE;
-
 
8116
		switch (intel_crtc->cursor_width) {
-
 
8117
			case 64:
-
 
8118
				cntl |= CURSOR_MODE_64_ARGB_AX;
-
 
8119
				break;
-
 
8120
			case 128:
-
 
8121
				cntl |= CURSOR_MODE_128_ARGB_AX;
-
 
8122
				break;
-
 
8123
			case 256:
-
 
8124
				cntl |= CURSOR_MODE_256_ARGB_AX;
-
 
8125
				break;
-
 
8126
			default:
-
 
8127
				WARN_ON(1);
-
 
8128
				return;
-
 
8129
			}
-
 
8130
		}
8271
 
8131
	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
8272
	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
-
 
8273
			cntl |= CURSOR_PIPE_CSC_ENABLE;
-
 
8274
	}
-
 
8275
 
-
 
8276
	if (to_intel_plane(crtc->cursor)->rotation == BIT(DRM_ROTATE_180))
8132
			cntl |= CURSOR_PIPE_CSC_ENABLE;
8277
		cntl |= CURSOR_ROTATE_180;
8133
 
8278
 
8134
	if (intel_crtc->cursor_cntl != cntl) {
8279
	if (intel_crtc->cursor_cntl != cntl) {
8135
		I915_WRITE(CURCNTR(pipe), cntl);
8280
		I915_WRITE(CURCNTR(pipe), cntl);
8136
		POSTING_READ(CURCNTR(pipe));
8281
		POSTING_READ(CURCNTR(pipe));
8137
		intel_crtc->cursor_cntl = cntl;
8282
		intel_crtc->cursor_cntl = cntl;
8138
		}
8283
		}
8139
 
8284
 
8140
	/* and commit changes on next vblank */
8285
	/* and commit changes on next vblank */
8141
	I915_WRITE(CURBASE(pipe), base);
8286
	I915_WRITE(CURBASE(pipe), base);
8142
	POSTING_READ(CURBASE(pipe));
8287
	POSTING_READ(CURBASE(pipe));
-
 
8288
 
-
 
8289
	intel_crtc->cursor_base = base;
8143
}
8290
}
8144
 
8291
 
8145
/* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
8292
/* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
8146
void intel_crtc_update_cursor(struct drm_crtc *crtc,
8293
void intel_crtc_update_cursor(struct drm_crtc *crtc,
8147
				     bool on)
8294
				     bool on)
8148
{
8295
{
8149
	struct drm_device *dev = crtc->dev;
8296
	struct drm_device *dev = crtc->dev;
8150
	struct drm_i915_private *dev_priv = dev->dev_private;
8297
	struct drm_i915_private *dev_priv = dev->dev_private;
8151
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8298
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8152
	int pipe = intel_crtc->pipe;
8299
	int pipe = intel_crtc->pipe;
8153
	int x = crtc->cursor_x;
8300
	int x = crtc->cursor_x;
8154
	int y = crtc->cursor_y;
8301
	int y = crtc->cursor_y;
8155
	u32 base = 0, pos = 0;
8302
	u32 base = 0, pos = 0;
8156
 
8303
 
8157
	if (on)
8304
	if (on)
8158
		base = intel_crtc->cursor_addr;
8305
		base = intel_crtc->cursor_addr;
8159
 
8306
 
8160
	if (x >= intel_crtc->config.pipe_src_w)
8307
	if (x >= intel_crtc->config.pipe_src_w)
8161
			base = 0;
8308
			base = 0;
8162
 
8309
 
8163
	if (y >= intel_crtc->config.pipe_src_h)
8310
	if (y >= intel_crtc->config.pipe_src_h)
8164
		base = 0;
8311
		base = 0;
8165
 
8312
 
8166
	if (x < 0) {
8313
	if (x < 0) {
8167
		if (x + intel_crtc->cursor_width <= 0)
8314
		if (x + intel_crtc->cursor_width <= 0)
8168
			base = 0;
8315
			base = 0;
8169
 
8316
 
8170
		pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
8317
		pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
8171
		x = -x;
8318
		x = -x;
8172
	}
8319
	}
8173
	pos |= x << CURSOR_X_SHIFT;
8320
	pos |= x << CURSOR_X_SHIFT;
8174
 
8321
 
8175
	if (y < 0) {
8322
	if (y < 0) {
8176
		if (y + intel_crtc->cursor_height <= 0)
8323
		if (y + intel_crtc->cursor_height <= 0)
8177
			base = 0;
8324
			base = 0;
8178
 
8325
 
8179
		pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
8326
		pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
8180
		y = -y;
8327
		y = -y;
8181
	}
8328
	}
8182
	pos |= y << CURSOR_Y_SHIFT;
8329
	pos |= y << CURSOR_Y_SHIFT;
8183
 
8330
 
8184
	if (base == 0 && intel_crtc->cursor_base == 0)
8331
	if (base == 0 && intel_crtc->cursor_base == 0)
8185
		return;
8332
		return;
8186
 
8333
 
8187
	I915_WRITE(CURPOS(pipe), pos);
8334
	I915_WRITE(CURPOS(pipe), pos);
-
 
8335
 
-
 
8336
	/* ILK+ do this automagically */
8188
 
8337
	if (HAS_GMCH_DISPLAY(dev) &&
-
 
8338
		to_intel_plane(crtc->cursor)->rotation == BIT(DRM_ROTATE_180)) {
8189
	if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev) || IS_BROADWELL(dev))
8339
		base += (intel_crtc->cursor_height *
-
 
8340
			intel_crtc->cursor_width - 1) * 4;
-
 
8341
	}
8190
		ivb_update_cursor(crtc, base);
8342
 
8191
	else if (IS_845G(dev) || IS_I865G(dev))
8343
	if (IS_845G(dev) || IS_I865G(dev))
8192
		i845_update_cursor(crtc, base);
8344
		i845_update_cursor(crtc, base);
8193
	else
8345
	else
8194
		i9xx_update_cursor(crtc, base);
8346
		i9xx_update_cursor(crtc, base);
8195
	intel_crtc->cursor_base = base;
-
 
8196
}
8347
}
-
 
8348
 
-
 
8349
static bool cursor_size_ok(struct drm_device *dev,
-
 
8350
			   uint32_t width, uint32_t height)
-
 
8351
{
-
 
8352
	if (width == 0 || height == 0)
-
 
8353
		return false;
8197
 
8354
 
8198
/*
8355
	/*
8199
 * intel_crtc_cursor_set_obj - Set cursor to specified GEM object
-
 
8200
 *
8356
	 * 845g/865g are special in that they are only limited by
8201
 * Note that the object's reference will be consumed if the update fails.  If
8357
	 * the width of their cursors, the height is arbitrary up to
8202
 * the update succeeds, the reference of the old object (if any) will be
8358
	 * the precision of the register. Everything else requires
8203
 * consumed.
8359
	 * square cursors, limited to a few power-of-two sizes.
-
 
8360
 */
-
 
8361
	if (IS_845G(dev) || IS_I865G(dev)) {
-
 
8362
		if ((width & 63) != 0)
-
 
8363
			return false;
-
 
8364
 
-
 
8365
		if (width > (IS_845G(dev) ? 64 : 512))
-
 
8366
			return false;
-
 
8367
 
-
 
8368
		if (height > 1023)
-
 
8369
			return false;
-
 
8370
	} else {
-
 
8371
		switch (width | height) {
-
 
8372
		case 256:
-
 
8373
		case 128:
-
 
8374
			if (IS_GEN2(dev))
-
 
8375
				return false;
-
 
8376
		case 64:
-
 
8377
			break;
-
 
8378
		default:
-
 
8379
			return false;
-
 
8380
		}
-
 
8381
	}
-
 
8382
 
-
 
8383
	return true;
-
 
8384
}
8204
 */
8385
 
8205
static int intel_crtc_cursor_set_obj(struct drm_crtc *crtc,
8386
static int intel_crtc_cursor_set_obj(struct drm_crtc *crtc,
8206
				     struct drm_i915_gem_object *obj,
8387
				     struct drm_i915_gem_object *obj,
8207
				 uint32_t width, uint32_t height)
8388
				 uint32_t width, uint32_t height)
8208
{
8389
{
8209
	struct drm_device *dev = crtc->dev;
8390
	struct drm_device *dev = crtc->dev;
8210
	struct drm_i915_private *dev_priv = dev->dev_private;
8391
	struct drm_i915_private *dev_priv = to_i915(dev);
8211
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8392
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8212
	enum pipe pipe = intel_crtc->pipe;
8393
	enum pipe pipe = intel_crtc->pipe;
8213
	unsigned old_width;
8394
	unsigned old_width;
8214
	uint32_t addr;
8395
	uint32_t addr;
8215
	int ret;
8396
	int ret;
8216
 
8397
 
8217
	/* if we want to turn off the cursor ignore width and height */
8398
	/* if we want to turn off the cursor ignore width and height */
8218
	if (!obj) {
8399
	if (!obj) {
8219
		DRM_DEBUG_KMS("cursor off\n");
8400
		DRM_DEBUG_KMS("cursor off\n");
8220
		addr = 0;
8401
		addr = 0;
8221
		obj = NULL;
-
 
8222
		mutex_lock(&dev->struct_mutex);
8402
		mutex_lock(&dev->struct_mutex);
8223
		goto finish;
8403
		goto finish;
8224
	}
8404
	}
8225
 
-
 
8226
	/* Check for which cursor types we support */
-
 
8227
	if (!((width == 64 && height == 64) ||
-
 
8228
			(width == 128 && height == 128 && !IS_GEN2(dev)) ||
-
 
8229
			(width == 256 && height == 256 && !IS_GEN2(dev)))) {
-
 
8230
		DRM_DEBUG("Cursor dimension not supported\n");
-
 
8231
		return -EINVAL;
-
 
8232
	}
-
 
8233
 
-
 
8234
	if (obj->base.size < width * height * 4) {
-
 
8235
		DRM_DEBUG_KMS("buffer is too small\n");
-
 
8236
		ret = -ENOMEM;
-
 
8237
		goto fail;
-
 
8238
	}
-
 
8239
 
8405
 
8240
	/* we only need to pin inside GTT if cursor is non-phy */
8406
	/* we only need to pin inside GTT if cursor is non-phy */
8241
	mutex_lock(&dev->struct_mutex);
8407
	mutex_lock(&dev->struct_mutex);
8242
	if (!INTEL_INFO(dev)->cursor_needs_physical) {
8408
	if (!INTEL_INFO(dev)->cursor_needs_physical) {
8243
		unsigned alignment;
8409
		unsigned alignment;
8244
 
-
 
8245
		if (obj->tiling_mode) {
-
 
8246
			DRM_DEBUG_KMS("cursor cannot be tiled\n");
-
 
8247
			ret = -EINVAL;
-
 
8248
			goto fail_locked;
-
 
8249
		}
-
 
8250
 
8410
 
8251
		/*
8411
		/*
8252
		 * Global gtt pte registers are special registers which actually
8412
		 * Global gtt pte registers are special registers which actually
8253
		 * forward writes to a chunk of system memory. Which means that
8413
		 * forward writes to a chunk of system memory. Which means that
8254
		 * there is no risk that the register values disappear as soon
8414
		 * there is no risk that the register values disappear as soon
8255
		 * as we call intel_runtime_pm_put(), so it is correct to wrap
8415
		 * as we call intel_runtime_pm_put(), so it is correct to wrap
8256
		 * only the pin/unpin/fence and not more.
8416
		 * only the pin/unpin/fence and not more.
8257
		 */
8417
		 */
8258
		intel_runtime_pm_get(dev_priv);
8418
		intel_runtime_pm_get(dev_priv);
8259
 
8419
 
8260
		/* Note that the w/a also requires 2 PTE of padding following
8420
		/* Note that the w/a also requires 2 PTE of padding following
8261
		 * the bo. We currently fill all unused PTE with the shadow
8421
		 * the bo. We currently fill all unused PTE with the shadow
8262
		 * page and so we should always have valid PTE following the
8422
		 * page and so we should always have valid PTE following the
8263
		 * cursor preventing the VT-d warning.
8423
		 * cursor preventing the VT-d warning.
8264
		 */
8424
		 */
8265
		alignment = 0;
8425
		alignment = 0;
8266
		if (need_vtd_wa(dev))
8426
		if (need_vtd_wa(dev))
8267
			alignment = 64*1024;
8427
			alignment = 64*1024;
8268
 
8428
 
8269
		ret = i915_gem_object_pin_to_display_plane(obj, alignment, NULL);
8429
		ret = i915_gem_object_pin_to_display_plane(obj, alignment, NULL);
8270
		if (ret) {
8430
		if (ret) {
8271
			DRM_DEBUG_KMS("failed to move cursor bo into the GTT\n");
8431
			DRM_DEBUG_KMS("failed to move cursor bo into the GTT\n");
8272
			intel_runtime_pm_put(dev_priv);
8432
			intel_runtime_pm_put(dev_priv);
8273
			goto fail_locked;
8433
			goto fail_locked;
8274
		}
8434
		}
8275
 
8435
 
8276
		ret = i915_gem_object_put_fence(obj);
8436
		ret = i915_gem_object_put_fence(obj);
8277
		if (ret) {
8437
		if (ret) {
8278
			DRM_DEBUG_KMS("failed to release fence for cursor");
8438
			DRM_DEBUG_KMS("failed to release fence for cursor");
8279
			intel_runtime_pm_put(dev_priv);
8439
			intel_runtime_pm_put(dev_priv);
8280
			goto fail_unpin;
8440
			goto fail_unpin;
8281
		}
8441
		}
8282
 
8442
 
8283
		addr = i915_gem_obj_ggtt_offset(obj);
8443
		addr = i915_gem_obj_ggtt_offset(obj);
8284
 
8444
 
8285
		intel_runtime_pm_put(dev_priv);
8445
		intel_runtime_pm_put(dev_priv);
8286
	} else {
8446
	} else {
8287
		int align = IS_I830(dev) ? 16 * 1024 : 256;
8447
       int align = IS_I830(dev) ? 16 * 1024 : 256;
8288
//		ret = i915_gem_object_attach_phys(obj, align);
8448
       ret = 1;//i915_gem_object_attach_phys(obj, align);
8289
//		if (ret) {
8449
       if (ret) {
8290
//			DRM_DEBUG_KMS("failed to attach phys object\n");
8450
           DRM_DEBUG_KMS("failed to attach phys object\n");
8291
//			goto fail_locked;
8451
           goto fail_locked;
8292
//		}
8452
       }
8293
//		addr = obj->phys_handle->busaddr;
8453
       addr = obj->phys_handle->busaddr;
8294
	}
8454
	}
8295
 
-
 
8296
	if (IS_GEN2(dev))
-
 
8297
		I915_WRITE(CURSIZE, (height << 12) | width);
-
 
8298
 
8455
 
8299
 finish:
8456
 finish:
8300
	if (intel_crtc->cursor_bo) {
8457
	if (intel_crtc->cursor_bo) {
8301
		if (!INTEL_INFO(dev)->cursor_needs_physical)
8458
		if (!INTEL_INFO(dev)->cursor_needs_physical)
8302
			i915_gem_object_unpin_from_display_plane(intel_crtc->cursor_bo);
8459
			i915_gem_object_unpin_from_display_plane(intel_crtc->cursor_bo);
8303
	}
8460
	}
8304
 
8461
 
8305
	i915_gem_track_fb(intel_crtc->cursor_bo, obj,
8462
	i915_gem_track_fb(intel_crtc->cursor_bo, obj,
8306
			  INTEL_FRONTBUFFER_CURSOR(pipe));
8463
			  INTEL_FRONTBUFFER_CURSOR(pipe));
8307
	mutex_unlock(&dev->struct_mutex);
8464
	mutex_unlock(&dev->struct_mutex);
8308
 
8465
 
8309
	old_width = intel_crtc->cursor_width;
8466
	old_width = intel_crtc->cursor_width;
8310
 
8467
 
8311
	intel_crtc->cursor_addr = addr;
8468
	intel_crtc->cursor_addr = addr;
8312
	intel_crtc->cursor_bo = obj;
8469
	intel_crtc->cursor_bo = obj;
8313
	intel_crtc->cursor_width = width;
8470
	intel_crtc->cursor_width = width;
8314
	intel_crtc->cursor_height = height;
8471
	intel_crtc->cursor_height = height;
8315
 
8472
 
8316
	if (intel_crtc->active) {
8473
	if (intel_crtc->active) {
8317
		if (old_width != width)
8474
		if (old_width != width)
8318
			intel_update_watermarks(crtc);
8475
			intel_update_watermarks(crtc);
8319
		intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL);
8476
		intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL);
-
 
8477
 
-
 
8478
		intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_CURSOR(pipe));
8320
	}
8479
	}
8321
 
8480
 
8322
	return 0;
8481
	return 0;
8323
fail_unpin:
8482
fail_unpin:
8324
	i915_gem_object_unpin_from_display_plane(obj);
8483
	i915_gem_object_unpin_from_display_plane(obj);
8325
fail_locked:
8484
fail_locked:
8326
	mutex_unlock(&dev->struct_mutex);
8485
	mutex_unlock(&dev->struct_mutex);
8327
fail:
-
 
8328
	drm_gem_object_unreference_unlocked(&obj->base);
-
 
8329
	return ret;
8486
	return ret;
8330
}
8487
}
8331
 
8488
 
8332
static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
8489
static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
8333
				 u16 *blue, uint32_t start, uint32_t size)
8490
				 u16 *blue, uint32_t start, uint32_t size)
8334
{
8491
{
8335
	int end = (start + size > 256) ? 256 : start + size, i;
8492
	int end = (start + size > 256) ? 256 : start + size, i;
8336
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8493
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8337
 
8494
 
8338
	for (i = start; i < end; i++) {
8495
	for (i = start; i < end; i++) {
8339
		intel_crtc->lut_r[i] = red[i] >> 8;
8496
		intel_crtc->lut_r[i] = red[i] >> 8;
8340
		intel_crtc->lut_g[i] = green[i] >> 8;
8497
		intel_crtc->lut_g[i] = green[i] >> 8;
8341
		intel_crtc->lut_b[i] = blue[i] >> 8;
8498
		intel_crtc->lut_b[i] = blue[i] >> 8;
8342
	}
8499
	}
8343
 
8500
 
8344
	intel_crtc_load_lut(crtc);
8501
	intel_crtc_load_lut(crtc);
8345
}
8502
}
8346
 
8503
 
8347
/* VESA 640x480x72Hz mode to set on the pipe */
8504
/* VESA 640x480x72Hz mode to set on the pipe */
8348
static struct drm_display_mode load_detect_mode = {
8505
static struct drm_display_mode load_detect_mode = {
8349
	DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
8506
	DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
8350
		 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
8507
		 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
8351
};
8508
};
8352
 
8509
 
8353
struct drm_framebuffer *
8510
struct drm_framebuffer *
8354
__intel_framebuffer_create(struct drm_device *dev,
8511
__intel_framebuffer_create(struct drm_device *dev,
8355
			 struct drm_mode_fb_cmd2 *mode_cmd,
8512
			 struct drm_mode_fb_cmd2 *mode_cmd,
8356
			 struct drm_i915_gem_object *obj)
8513
			 struct drm_i915_gem_object *obj)
8357
{
8514
{
8358
	struct intel_framebuffer *intel_fb;
8515
	struct intel_framebuffer *intel_fb;
8359
	int ret;
8516
	int ret;
8360
 
8517
 
8361
	intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
8518
	intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
8362
	if (!intel_fb) {
8519
	if (!intel_fb) {
8363
		drm_gem_object_unreference_unlocked(&obj->base);
8520
		drm_gem_object_unreference(&obj->base);
8364
		return ERR_PTR(-ENOMEM);
8521
		return ERR_PTR(-ENOMEM);
8365
	}
8522
	}
8366
 
8523
 
8367
	ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj);
8524
	ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj);
8368
	if (ret)
8525
	if (ret)
8369
		goto err;
8526
		goto err;
8370
 
8527
 
8371
	return &intel_fb->base;
8528
	return &intel_fb->base;
8372
err:
8529
err:
8373
		drm_gem_object_unreference_unlocked(&obj->base);
8530
	drm_gem_object_unreference(&obj->base);
8374
		kfree(intel_fb);
8531
		kfree(intel_fb);
8375
 
8532
 
8376
		return ERR_PTR(ret);
8533
		return ERR_PTR(ret);
8377
}
8534
}
8378
 
8535
 
8379
static struct drm_framebuffer *
8536
static struct drm_framebuffer *
8380
intel_framebuffer_create(struct drm_device *dev,
8537
intel_framebuffer_create(struct drm_device *dev,
8381
			 struct drm_mode_fb_cmd2 *mode_cmd,
8538
			 struct drm_mode_fb_cmd2 *mode_cmd,
8382
			 struct drm_i915_gem_object *obj)
8539
			 struct drm_i915_gem_object *obj)
8383
{
8540
{
8384
	struct drm_framebuffer *fb;
8541
	struct drm_framebuffer *fb;
8385
	int ret;
8542
	int ret;
8386
 
8543
 
8387
	ret = i915_mutex_lock_interruptible(dev);
8544
	ret = i915_mutex_lock_interruptible(dev);
8388
	if (ret)
8545
	if (ret)
8389
		return ERR_PTR(ret);
8546
		return ERR_PTR(ret);
8390
	fb = __intel_framebuffer_create(dev, mode_cmd, obj);
8547
	fb = __intel_framebuffer_create(dev, mode_cmd, obj);
8391
	mutex_unlock(&dev->struct_mutex);
8548
	mutex_unlock(&dev->struct_mutex);
8392
 
8549
 
8393
	return fb;
8550
	return fb;
8394
}
8551
}
8395
 
8552
 
8396
static u32
8553
static u32
8397
intel_framebuffer_pitch_for_width(int width, int bpp)
8554
intel_framebuffer_pitch_for_width(int width, int bpp)
8398
{
8555
{
8399
	u32 pitch = DIV_ROUND_UP(width * bpp, 8);
8556
	u32 pitch = DIV_ROUND_UP(width * bpp, 8);
8400
	return ALIGN(pitch, 64);
8557
	return ALIGN(pitch, 64);
8401
}
8558
}
8402
 
8559
 
8403
static u32
8560
static u32
8404
intel_framebuffer_size_for_mode(struct drm_display_mode *mode, int bpp)
8561
intel_framebuffer_size_for_mode(struct drm_display_mode *mode, int bpp)
8405
{
8562
{
8406
	u32 pitch = intel_framebuffer_pitch_for_width(mode->hdisplay, bpp);
8563
	u32 pitch = intel_framebuffer_pitch_for_width(mode->hdisplay, bpp);
8407
	return PAGE_ALIGN(pitch * mode->vdisplay);
8564
	return PAGE_ALIGN(pitch * mode->vdisplay);
8408
}
8565
}
8409
 
8566
 
8410
static struct drm_framebuffer *
8567
static struct drm_framebuffer *
8411
intel_framebuffer_create_for_mode(struct drm_device *dev,
8568
intel_framebuffer_create_for_mode(struct drm_device *dev,
8412
				  struct drm_display_mode *mode,
8569
				  struct drm_display_mode *mode,
8413
				  int depth, int bpp)
8570
				  int depth, int bpp)
8414
{
8571
{
8415
	struct drm_i915_gem_object *obj;
8572
	struct drm_i915_gem_object *obj;
8416
	struct drm_mode_fb_cmd2 mode_cmd = { 0 };
8573
	struct drm_mode_fb_cmd2 mode_cmd = { 0 };
8417
 
8574
 
8418
	obj = i915_gem_alloc_object(dev,
8575
	obj = i915_gem_alloc_object(dev,
8419
				    intel_framebuffer_size_for_mode(mode, bpp));
8576
				    intel_framebuffer_size_for_mode(mode, bpp));
8420
	if (obj == NULL)
8577
	if (obj == NULL)
8421
		return ERR_PTR(-ENOMEM);
8578
		return ERR_PTR(-ENOMEM);
8422
 
8579
 
8423
	mode_cmd.width = mode->hdisplay;
8580
	mode_cmd.width = mode->hdisplay;
8424
	mode_cmd.height = mode->vdisplay;
8581
	mode_cmd.height = mode->vdisplay;
8425
	mode_cmd.pitches[0] = intel_framebuffer_pitch_for_width(mode_cmd.width,
8582
	mode_cmd.pitches[0] = intel_framebuffer_pitch_for_width(mode_cmd.width,
8426
								bpp);
8583
								bpp);
8427
	mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
8584
	mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
8428
 
8585
 
8429
	return intel_framebuffer_create(dev, &mode_cmd, obj);
8586
	return intel_framebuffer_create(dev, &mode_cmd, obj);
8430
}
8587
}
8431
 
8588
 
8432
static struct drm_framebuffer *
8589
static struct drm_framebuffer *
8433
mode_fits_in_fbdev(struct drm_device *dev,
8590
mode_fits_in_fbdev(struct drm_device *dev,
8434
		   struct drm_display_mode *mode)
8591
		   struct drm_display_mode *mode)
8435
{
8592
{
8436
#ifdef CONFIG_DRM_I915_FBDEV
8593
#ifdef CONFIG_DRM_I915_FBDEV
8437
	struct drm_i915_private *dev_priv = dev->dev_private;
8594
	struct drm_i915_private *dev_priv = dev->dev_private;
8438
	struct drm_i915_gem_object *obj;
8595
	struct drm_i915_gem_object *obj;
8439
	struct drm_framebuffer *fb;
8596
	struct drm_framebuffer *fb;
8440
 
8597
 
8441
	if (!dev_priv->fbdev)
8598
	if (!dev_priv->fbdev)
8442
		return NULL;
8599
		return NULL;
8443
 
8600
 
8444
	if (!dev_priv->fbdev->fb)
8601
	if (!dev_priv->fbdev->fb)
8445
		return NULL;
8602
		return NULL;
8446
 
8603
 
8447
	obj = dev_priv->fbdev->fb->obj;
8604
	obj = dev_priv->fbdev->fb->obj;
8448
	BUG_ON(!obj);
8605
	BUG_ON(!obj);
8449
 
8606
 
8450
	fb = &dev_priv->fbdev->fb->base;
8607
	fb = &dev_priv->fbdev->fb->base;
8451
	if (fb->pitches[0] < intel_framebuffer_pitch_for_width(mode->hdisplay,
8608
	if (fb->pitches[0] < intel_framebuffer_pitch_for_width(mode->hdisplay,
8452
							       fb->bits_per_pixel))
8609
							       fb->bits_per_pixel))
8453
		return NULL;
8610
		return NULL;
8454
 
8611
 
8455
	if (obj->base.size < mode->vdisplay * fb->pitches[0])
8612
	if (obj->base.size < mode->vdisplay * fb->pitches[0])
8456
		return NULL;
8613
		return NULL;
8457
 
8614
 
8458
	return fb;
8615
	return fb;
8459
#else
8616
#else
8460
	return NULL;
8617
	return NULL;
8461
#endif
8618
#endif
8462
}
8619
}
8463
 
8620
 
8464
bool intel_get_load_detect_pipe(struct drm_connector *connector,
8621
bool intel_get_load_detect_pipe(struct drm_connector *connector,
8465
				struct drm_display_mode *mode,
8622
				struct drm_display_mode *mode,
8466
				struct intel_load_detect_pipe *old,
8623
				struct intel_load_detect_pipe *old,
8467
				struct drm_modeset_acquire_ctx *ctx)
8624
				struct drm_modeset_acquire_ctx *ctx)
8468
{
8625
{
8469
	struct intel_crtc *intel_crtc;
8626
	struct intel_crtc *intel_crtc;
8470
	struct intel_encoder *intel_encoder =
8627
	struct intel_encoder *intel_encoder =
8471
		intel_attached_encoder(connector);
8628
		intel_attached_encoder(connector);
8472
	struct drm_crtc *possible_crtc;
8629
	struct drm_crtc *possible_crtc;
8473
	struct drm_encoder *encoder = &intel_encoder->base;
8630
	struct drm_encoder *encoder = &intel_encoder->base;
8474
	struct drm_crtc *crtc = NULL;
8631
	struct drm_crtc *crtc = NULL;
8475
	struct drm_device *dev = encoder->dev;
8632
	struct drm_device *dev = encoder->dev;
8476
	struct drm_framebuffer *fb;
8633
	struct drm_framebuffer *fb;
8477
	struct drm_mode_config *config = &dev->mode_config;
8634
	struct drm_mode_config *config = &dev->mode_config;
8478
	int ret, i = -1;
8635
	int ret, i = -1;
8479
 
8636
 
8480
	DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
8637
	DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
8481
		      connector->base.id, connector->name,
8638
		      connector->base.id, connector->name,
8482
		      encoder->base.id, encoder->name);
8639
		      encoder->base.id, encoder->name);
8483
 
8640
 
8484
retry:
8641
retry:
8485
	ret = drm_modeset_lock(&config->connection_mutex, ctx);
8642
	ret = drm_modeset_lock(&config->connection_mutex, ctx);
8486
	if (ret)
8643
	if (ret)
8487
		goto fail_unlock;
8644
		goto fail_unlock;
8488
 
8645
 
8489
	/*
8646
	/*
8490
	 * Algorithm gets a little messy:
8647
	 * Algorithm gets a little messy:
8491
	 *
8648
	 *
8492
	 *   - if the connector already has an assigned crtc, use it (but make
8649
	 *   - if the connector already has an assigned crtc, use it (but make
8493
	 *     sure it's on first)
8650
	 *     sure it's on first)
8494
	 *
8651
	 *
8495
	 *   - try to find the first unused crtc that can drive this connector,
8652
	 *   - try to find the first unused crtc that can drive this connector,
8496
	 *     and use that if we find one
8653
	 *     and use that if we find one
8497
	 */
8654
	 */
8498
 
8655
 
8499
	/* See if we already have a CRTC for this connector */
8656
	/* See if we already have a CRTC for this connector */
8500
	if (encoder->crtc) {
8657
	if (encoder->crtc) {
8501
		crtc = encoder->crtc;
8658
		crtc = encoder->crtc;
8502
 
8659
 
8503
		ret = drm_modeset_lock(&crtc->mutex, ctx);
8660
		ret = drm_modeset_lock(&crtc->mutex, ctx);
8504
		if (ret)
8661
		if (ret)
8505
			goto fail_unlock;
8662
			goto fail_unlock;
-
 
8663
		ret = drm_modeset_lock(&crtc->primary->mutex, ctx);
-
 
8664
		if (ret)
-
 
8665
			goto fail_unlock;
8506
 
8666
 
8507
		old->dpms_mode = connector->dpms;
8667
		old->dpms_mode = connector->dpms;
8508
		old->load_detect_temp = false;
8668
		old->load_detect_temp = false;
8509
 
8669
 
8510
		/* Make sure the crtc and connector are running */
8670
		/* Make sure the crtc and connector are running */
8511
		if (connector->dpms != DRM_MODE_DPMS_ON)
8671
		if (connector->dpms != DRM_MODE_DPMS_ON)
8512
			connector->funcs->dpms(connector, DRM_MODE_DPMS_ON);
8672
			connector->funcs->dpms(connector, DRM_MODE_DPMS_ON);
8513
 
8673
 
8514
		return true;
8674
		return true;
8515
	}
8675
	}
8516
 
8676
 
8517
	/* Find an unused one (if possible) */
8677
	/* Find an unused one (if possible) */
8518
	for_each_crtc(dev, possible_crtc) {
8678
	for_each_crtc(dev, possible_crtc) {
8519
		i++;
8679
		i++;
8520
		if (!(encoder->possible_crtcs & (1 << i)))
8680
		if (!(encoder->possible_crtcs & (1 << i)))
8521
			continue;
8681
			continue;
8522
		if (possible_crtc->enabled)
8682
		if (possible_crtc->enabled)
8523
			continue;
8683
			continue;
8524
		/* This can occur when applying the pipe A quirk on resume. */
8684
		/* This can occur when applying the pipe A quirk on resume. */
8525
		if (to_intel_crtc(possible_crtc)->new_enabled)
8685
		if (to_intel_crtc(possible_crtc)->new_enabled)
8526
			continue;
8686
			continue;
8527
 
8687
 
8528
			crtc = possible_crtc;
8688
			crtc = possible_crtc;
8529
			break;
8689
			break;
8530
		}
8690
		}
8531
 
8691
 
8532
	/*
8692
	/*
8533
	 * If we didn't find an unused CRTC, don't use any.
8693
	 * If we didn't find an unused CRTC, don't use any.
8534
	 */
8694
	 */
8535
	if (!crtc) {
8695
	if (!crtc) {
8536
		DRM_DEBUG_KMS("no pipe available for load-detect\n");
8696
		DRM_DEBUG_KMS("no pipe available for load-detect\n");
8537
		goto fail_unlock;
8697
		goto fail_unlock;
8538
	}
8698
	}
8539
 
8699
 
8540
	ret = drm_modeset_lock(&crtc->mutex, ctx);
8700
	ret = drm_modeset_lock(&crtc->mutex, ctx);
8541
	if (ret)
8701
	if (ret)
8542
		goto fail_unlock;
8702
		goto fail_unlock;
-
 
8703
	ret = drm_modeset_lock(&crtc->primary->mutex, ctx);
-
 
8704
	if (ret)
-
 
8705
		goto fail_unlock;
8543
	intel_encoder->new_crtc = to_intel_crtc(crtc);
8706
	intel_encoder->new_crtc = to_intel_crtc(crtc);
8544
	to_intel_connector(connector)->new_encoder = intel_encoder;
8707
	to_intel_connector(connector)->new_encoder = intel_encoder;
8545
 
8708
 
8546
	intel_crtc = to_intel_crtc(crtc);
8709
	intel_crtc = to_intel_crtc(crtc);
8547
	intel_crtc->new_enabled = true;
8710
	intel_crtc->new_enabled = true;
8548
	intel_crtc->new_config = &intel_crtc->config;
8711
	intel_crtc->new_config = &intel_crtc->config;
8549
	old->dpms_mode = connector->dpms;
8712
	old->dpms_mode = connector->dpms;
8550
	old->load_detect_temp = true;
8713
	old->load_detect_temp = true;
8551
	old->release_fb = NULL;
8714
	old->release_fb = NULL;
8552
 
8715
 
8553
	if (!mode)
8716
	if (!mode)
8554
		mode = &load_detect_mode;
8717
		mode = &load_detect_mode;
8555
 
8718
 
8556
	/* We need a framebuffer large enough to accommodate all accesses
8719
	/* We need a framebuffer large enough to accommodate all accesses
8557
	 * that the plane may generate whilst we perform load detection.
8720
	 * that the plane may generate whilst we perform load detection.
8558
	 * We can not rely on the fbcon either being present (we get called
8721
	 * We can not rely on the fbcon either being present (we get called
8559
	 * during its initialisation to detect all boot displays, or it may
8722
	 * during its initialisation to detect all boot displays, or it may
8560
	 * not even exist) or that it is large enough to satisfy the
8723
	 * not even exist) or that it is large enough to satisfy the
8561
	 * requested mode.
8724
	 * requested mode.
8562
	 */
8725
	 */
8563
	fb = mode_fits_in_fbdev(dev, mode);
8726
	fb = mode_fits_in_fbdev(dev, mode);
8564
	if (fb == NULL) {
8727
	if (fb == NULL) {
8565
		DRM_DEBUG_KMS("creating tmp fb for load-detection\n");
8728
		DRM_DEBUG_KMS("creating tmp fb for load-detection\n");
8566
		fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32);
8729
		fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32);
8567
		old->release_fb = fb;
8730
		old->release_fb = fb;
8568
	} else
8731
	} else
8569
		DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n");
8732
		DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n");
8570
	if (IS_ERR(fb)) {
8733
	if (IS_ERR(fb)) {
8571
		DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n");
8734
		DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n");
8572
		goto fail;
8735
		goto fail;
8573
	}
8736
	}
8574
 
8737
 
8575
	if (intel_set_mode(crtc, mode, 0, 0, fb)) {
8738
	if (intel_set_mode(crtc, mode, 0, 0, fb)) {
8576
		DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
8739
		DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
8577
		if (old->release_fb)
8740
		if (old->release_fb)
8578
			old->release_fb->funcs->destroy(old->release_fb);
8741
			old->release_fb->funcs->destroy(old->release_fb);
8579
		goto fail;
8742
		goto fail;
8580
	}
8743
	}
8581
 
8744
 
8582
	/* let the connector get through one full cycle before testing */
8745
	/* let the connector get through one full cycle before testing */
8583
	intel_wait_for_vblank(dev, intel_crtc->pipe);
8746
	intel_wait_for_vblank(dev, intel_crtc->pipe);
8584
	return true;
8747
	return true;
8585
 
8748
 
8586
 fail:
8749
 fail:
8587
	intel_crtc->new_enabled = crtc->enabled;
8750
	intel_crtc->new_enabled = crtc->enabled;
8588
	if (intel_crtc->new_enabled)
8751
	if (intel_crtc->new_enabled)
8589
		intel_crtc->new_config = &intel_crtc->config;
8752
		intel_crtc->new_config = &intel_crtc->config;
8590
	else
8753
	else
8591
		intel_crtc->new_config = NULL;
8754
		intel_crtc->new_config = NULL;
8592
fail_unlock:
8755
fail_unlock:
8593
	if (ret == -EDEADLK) {
8756
	if (ret == -EDEADLK) {
8594
		drm_modeset_backoff(ctx);
8757
		drm_modeset_backoff(ctx);
8595
		goto retry;
8758
		goto retry;
8596
	}
8759
	}
8597
 
8760
 
8598
	return false;
8761
	return false;
8599
}
8762
}
8600
 
8763
 
8601
void intel_release_load_detect_pipe(struct drm_connector *connector,
8764
void intel_release_load_detect_pipe(struct drm_connector *connector,
8602
				    struct intel_load_detect_pipe *old)
8765
				    struct intel_load_detect_pipe *old)
8603
{
8766
{
8604
	struct intel_encoder *intel_encoder =
8767
	struct intel_encoder *intel_encoder =
8605
		intel_attached_encoder(connector);
8768
		intel_attached_encoder(connector);
8606
	struct drm_encoder *encoder = &intel_encoder->base;
8769
	struct drm_encoder *encoder = &intel_encoder->base;
8607
	struct drm_crtc *crtc = encoder->crtc;
8770
	struct drm_crtc *crtc = encoder->crtc;
8608
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8771
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8609
 
8772
 
8610
	DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
8773
	DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
8611
		      connector->base.id, connector->name,
8774
		      connector->base.id, connector->name,
8612
		      encoder->base.id, encoder->name);
8775
		      encoder->base.id, encoder->name);
8613
 
8776
 
8614
	if (old->load_detect_temp) {
8777
	if (old->load_detect_temp) {
8615
		to_intel_connector(connector)->new_encoder = NULL;
8778
		to_intel_connector(connector)->new_encoder = NULL;
8616
		intel_encoder->new_crtc = NULL;
8779
		intel_encoder->new_crtc = NULL;
8617
		intel_crtc->new_enabled = false;
8780
		intel_crtc->new_enabled = false;
8618
		intel_crtc->new_config = NULL;
8781
		intel_crtc->new_config = NULL;
8619
		intel_set_mode(crtc, NULL, 0, 0, NULL);
8782
		intel_set_mode(crtc, NULL, 0, 0, NULL);
8620
 
8783
 
8621
		if (old->release_fb) {
8784
		if (old->release_fb) {
8622
			drm_framebuffer_unregister_private(old->release_fb);
8785
			drm_framebuffer_unregister_private(old->release_fb);
8623
			drm_framebuffer_unreference(old->release_fb);
8786
			drm_framebuffer_unreference(old->release_fb);
8624
		}
8787
		}
8625
 
8788
 
8626
		return;
8789
		return;
8627
	}
8790
	}
8628
 
8791
 
8629
	/* Switch crtc and encoder back off if necessary */
8792
	/* Switch crtc and encoder back off if necessary */
8630
	if (old->dpms_mode != DRM_MODE_DPMS_ON)
8793
	if (old->dpms_mode != DRM_MODE_DPMS_ON)
8631
		connector->funcs->dpms(connector, old->dpms_mode);
8794
		connector->funcs->dpms(connector, old->dpms_mode);
8632
}
8795
}
8633
 
8796
 
8634
static int i9xx_pll_refclk(struct drm_device *dev,
8797
static int i9xx_pll_refclk(struct drm_device *dev,
8635
			   const struct intel_crtc_config *pipe_config)
8798
			   const struct intel_crtc_config *pipe_config)
8636
{
8799
{
8637
	struct drm_i915_private *dev_priv = dev->dev_private;
8800
	struct drm_i915_private *dev_priv = dev->dev_private;
8638
	u32 dpll = pipe_config->dpll_hw_state.dpll;
8801
	u32 dpll = pipe_config->dpll_hw_state.dpll;
8639
 
8802
 
8640
	if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
8803
	if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
8641
		return dev_priv->vbt.lvds_ssc_freq;
8804
		return dev_priv->vbt.lvds_ssc_freq;
8642
	else if (HAS_PCH_SPLIT(dev))
8805
	else if (HAS_PCH_SPLIT(dev))
8643
		return 120000;
8806
		return 120000;
8644
	else if (!IS_GEN2(dev))
8807
	else if (!IS_GEN2(dev))
8645
		return 96000;
8808
		return 96000;
8646
	else
8809
	else
8647
		return 48000;
8810
		return 48000;
8648
}
8811
}
8649
 
8812
 
8650
/* Returns the clock of the currently programmed mode of the given pipe. */
8813
/* Returns the clock of the currently programmed mode of the given pipe. */
8651
static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
8814
static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
8652
				struct intel_crtc_config *pipe_config)
8815
				struct intel_crtc_config *pipe_config)
8653
{
8816
{
8654
	struct drm_device *dev = crtc->base.dev;
8817
	struct drm_device *dev = crtc->base.dev;
8655
	struct drm_i915_private *dev_priv = dev->dev_private;
8818
	struct drm_i915_private *dev_priv = dev->dev_private;
8656
	int pipe = pipe_config->cpu_transcoder;
8819
	int pipe = pipe_config->cpu_transcoder;
8657
	u32 dpll = pipe_config->dpll_hw_state.dpll;
8820
	u32 dpll = pipe_config->dpll_hw_state.dpll;
8658
	u32 fp;
8821
	u32 fp;
8659
	intel_clock_t clock;
8822
	intel_clock_t clock;
8660
	int refclk = i9xx_pll_refclk(dev, pipe_config);
8823
	int refclk = i9xx_pll_refclk(dev, pipe_config);
8661
 
8824
 
8662
	if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
8825
	if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
8663
		fp = pipe_config->dpll_hw_state.fp0;
8826
		fp = pipe_config->dpll_hw_state.fp0;
8664
	else
8827
	else
8665
		fp = pipe_config->dpll_hw_state.fp1;
8828
		fp = pipe_config->dpll_hw_state.fp1;
8666
 
8829
 
8667
	clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
8830
	clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
8668
	if (IS_PINEVIEW(dev)) {
8831
	if (IS_PINEVIEW(dev)) {
8669
		clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
8832
		clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
8670
		clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
8833
		clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
8671
	} else {
8834
	} else {
8672
		clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
8835
		clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
8673
		clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
8836
		clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
8674
	}
8837
	}
8675
 
8838
 
8676
	if (!IS_GEN2(dev)) {
8839
	if (!IS_GEN2(dev)) {
8677
		if (IS_PINEVIEW(dev))
8840
		if (IS_PINEVIEW(dev))
8678
			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
8841
			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
8679
				DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
8842
				DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
8680
		else
8843
		else
8681
			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
8844
			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
8682
			       DPLL_FPA01_P1_POST_DIV_SHIFT);
8845
			       DPLL_FPA01_P1_POST_DIV_SHIFT);
8683
 
8846
 
8684
		switch (dpll & DPLL_MODE_MASK) {
8847
		switch (dpll & DPLL_MODE_MASK) {
8685
		case DPLLB_MODE_DAC_SERIAL:
8848
		case DPLLB_MODE_DAC_SERIAL:
8686
			clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
8849
			clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
8687
				5 : 10;
8850
				5 : 10;
8688
			break;
8851
			break;
8689
		case DPLLB_MODE_LVDS:
8852
		case DPLLB_MODE_LVDS:
8690
			clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
8853
			clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
8691
				7 : 14;
8854
				7 : 14;
8692
			break;
8855
			break;
8693
		default:
8856
		default:
8694
			DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
8857
			DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
8695
				  "mode\n", (int)(dpll & DPLL_MODE_MASK));
8858
				  "mode\n", (int)(dpll & DPLL_MODE_MASK));
8696
			return;
8859
			return;
8697
		}
8860
		}
8698
 
8861
 
8699
		if (IS_PINEVIEW(dev))
8862
		if (IS_PINEVIEW(dev))
8700
			pineview_clock(refclk, &clock);
8863
			pineview_clock(refclk, &clock);
8701
		else
8864
		else
8702
			i9xx_clock(refclk, &clock);
8865
			i9xx_clock(refclk, &clock);
8703
	} else {
8866
	} else {
8704
		u32 lvds = IS_I830(dev) ? 0 : I915_READ(LVDS);
8867
		u32 lvds = IS_I830(dev) ? 0 : I915_READ(LVDS);
8705
		bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN);
8868
		bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN);
8706
 
8869
 
8707
		if (is_lvds) {
8870
		if (is_lvds) {
8708
			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
8871
			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
8709
				       DPLL_FPA01_P1_POST_DIV_SHIFT);
8872
				       DPLL_FPA01_P1_POST_DIV_SHIFT);
8710
 
8873
 
8711
			if (lvds & LVDS_CLKB_POWER_UP)
8874
			if (lvds & LVDS_CLKB_POWER_UP)
8712
				clock.p2 = 7;
8875
				clock.p2 = 7;
8713
			else
8876
			else
8714
			clock.p2 = 14;
8877
			clock.p2 = 14;
8715
		} else {
8878
		} else {
8716
			if (dpll & PLL_P1_DIVIDE_BY_TWO)
8879
			if (dpll & PLL_P1_DIVIDE_BY_TWO)
8717
				clock.p1 = 2;
8880
				clock.p1 = 2;
8718
			else {
8881
			else {
8719
				clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
8882
				clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
8720
					    DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
8883
					    DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
8721
			}
8884
			}
8722
			if (dpll & PLL_P2_DIVIDE_BY_4)
8885
			if (dpll & PLL_P2_DIVIDE_BY_4)
8723
				clock.p2 = 4;
8886
				clock.p2 = 4;
8724
			else
8887
			else
8725
				clock.p2 = 2;
8888
				clock.p2 = 2;
8726
		}
8889
		}
8727
 
8890
 
8728
		i9xx_clock(refclk, &clock);
8891
		i9xx_clock(refclk, &clock);
8729
	}
8892
	}
8730
 
8893
 
8731
	/*
8894
	/*
8732
	 * This value includes pixel_multiplier. We will use
8895
	 * This value includes pixel_multiplier. We will use
8733
	 * port_clock to compute adjusted_mode.crtc_clock in the
8896
	 * port_clock to compute adjusted_mode.crtc_clock in the
8734
	 * encoder's get_config() function.
8897
	 * encoder's get_config() function.
8735
	 */
8898
	 */
8736
	pipe_config->port_clock = clock.dot;
8899
	pipe_config->port_clock = clock.dot;
8737
}
8900
}
8738
 
8901
 
8739
int intel_dotclock_calculate(int link_freq,
8902
int intel_dotclock_calculate(int link_freq,
8740
			     const struct intel_link_m_n *m_n)
8903
			     const struct intel_link_m_n *m_n)
8741
{
8904
{
8742
	/*
8905
	/*
8743
	 * The calculation for the data clock is:
8906
	 * The calculation for the data clock is:
8744
	 * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
8907
	 * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
8745
	 * But we want to avoid losing precison if possible, so:
8908
	 * But we want to avoid losing precison if possible, so:
8746
	 * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
8909
	 * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
8747
	 *
8910
	 *
8748
	 * and the link clock is simpler:
8911
	 * and the link clock is simpler:
8749
	 * link_clock = (m * link_clock) / n
8912
	 * link_clock = (m * link_clock) / n
8750
	 */
8913
	 */
8751
 
8914
 
8752
	if (!m_n->link_n)
8915
	if (!m_n->link_n)
8753
		return 0;
8916
		return 0;
8754
 
8917
 
8755
	return div_u64((u64)m_n->link_m * link_freq, m_n->link_n);
8918
	return div_u64((u64)m_n->link_m * link_freq, m_n->link_n);
8756
}
8919
}
8757
 
8920
 
8758
static void ironlake_pch_clock_get(struct intel_crtc *crtc,
8921
static void ironlake_pch_clock_get(struct intel_crtc *crtc,
8759
				   struct intel_crtc_config *pipe_config)
8922
				   struct intel_crtc_config *pipe_config)
8760
{
8923
{
8761
	struct drm_device *dev = crtc->base.dev;
8924
	struct drm_device *dev = crtc->base.dev;
8762
 
8925
 
8763
	/* read out port_clock from the DPLL */
8926
	/* read out port_clock from the DPLL */
8764
	i9xx_crtc_clock_get(crtc, pipe_config);
8927
	i9xx_crtc_clock_get(crtc, pipe_config);
8765
 
8928
 
8766
	/*
8929
	/*
8767
	 * This value does not include pixel_multiplier.
8930
	 * This value does not include pixel_multiplier.
8768
	 * We will check that port_clock and adjusted_mode.crtc_clock
8931
	 * We will check that port_clock and adjusted_mode.crtc_clock
8769
	 * agree once we know their relationship in the encoder's
8932
	 * agree once we know their relationship in the encoder's
8770
	 * get_config() function.
8933
	 * get_config() function.
8771
	 */
8934
	 */
8772
	pipe_config->adjusted_mode.crtc_clock =
8935
	pipe_config->adjusted_mode.crtc_clock =
8773
		intel_dotclock_calculate(intel_fdi_link_freq(dev) * 10000,
8936
		intel_dotclock_calculate(intel_fdi_link_freq(dev) * 10000,
8774
					 &pipe_config->fdi_m_n);
8937
					 &pipe_config->fdi_m_n);
8775
}
8938
}
8776
 
8939
 
8777
/** Returns the currently programmed mode of the given pipe. */
8940
/** Returns the currently programmed mode of the given pipe. */
8778
struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
8941
struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
8779
					     struct drm_crtc *crtc)
8942
					     struct drm_crtc *crtc)
8780
{
8943
{
8781
	struct drm_i915_private *dev_priv = dev->dev_private;
8944
	struct drm_i915_private *dev_priv = dev->dev_private;
8782
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8945
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8783
	enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
8946
	enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
8784
	struct drm_display_mode *mode;
8947
	struct drm_display_mode *mode;
8785
	struct intel_crtc_config pipe_config;
8948
	struct intel_crtc_config pipe_config;
8786
	int htot = I915_READ(HTOTAL(cpu_transcoder));
8949
	int htot = I915_READ(HTOTAL(cpu_transcoder));
8787
	int hsync = I915_READ(HSYNC(cpu_transcoder));
8950
	int hsync = I915_READ(HSYNC(cpu_transcoder));
8788
	int vtot = I915_READ(VTOTAL(cpu_transcoder));
8951
	int vtot = I915_READ(VTOTAL(cpu_transcoder));
8789
	int vsync = I915_READ(VSYNC(cpu_transcoder));
8952
	int vsync = I915_READ(VSYNC(cpu_transcoder));
8790
	enum pipe pipe = intel_crtc->pipe;
8953
	enum pipe pipe = intel_crtc->pipe;
8791
 
8954
 
8792
	mode = kzalloc(sizeof(*mode), GFP_KERNEL);
8955
	mode = kzalloc(sizeof(*mode), GFP_KERNEL);
8793
	if (!mode)
8956
	if (!mode)
8794
		return NULL;
8957
		return NULL;
8795
 
8958
 
8796
	/*
8959
	/*
8797
	 * Construct a pipe_config sufficient for getting the clock info
8960
	 * Construct a pipe_config sufficient for getting the clock info
8798
	 * back out of crtc_clock_get.
8961
	 * back out of crtc_clock_get.
8799
	 *
8962
	 *
8800
	 * Note, if LVDS ever uses a non-1 pixel multiplier, we'll need
8963
	 * Note, if LVDS ever uses a non-1 pixel multiplier, we'll need
8801
	 * to use a real value here instead.
8964
	 * to use a real value here instead.
8802
	 */
8965
	 */
8803
	pipe_config.cpu_transcoder = (enum transcoder) pipe;
8966
	pipe_config.cpu_transcoder = (enum transcoder) pipe;
8804
	pipe_config.pixel_multiplier = 1;
8967
	pipe_config.pixel_multiplier = 1;
8805
	pipe_config.dpll_hw_state.dpll = I915_READ(DPLL(pipe));
8968
	pipe_config.dpll_hw_state.dpll = I915_READ(DPLL(pipe));
8806
	pipe_config.dpll_hw_state.fp0 = I915_READ(FP0(pipe));
8969
	pipe_config.dpll_hw_state.fp0 = I915_READ(FP0(pipe));
8807
	pipe_config.dpll_hw_state.fp1 = I915_READ(FP1(pipe));
8970
	pipe_config.dpll_hw_state.fp1 = I915_READ(FP1(pipe));
8808
	i9xx_crtc_clock_get(intel_crtc, &pipe_config);
8971
	i9xx_crtc_clock_get(intel_crtc, &pipe_config);
8809
 
8972
 
8810
	mode->clock = pipe_config.port_clock / pipe_config.pixel_multiplier;
8973
	mode->clock = pipe_config.port_clock / pipe_config.pixel_multiplier;
8811
	mode->hdisplay = (htot & 0xffff) + 1;
8974
	mode->hdisplay = (htot & 0xffff) + 1;
8812
	mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
8975
	mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
8813
	mode->hsync_start = (hsync & 0xffff) + 1;
8976
	mode->hsync_start = (hsync & 0xffff) + 1;
8814
	mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1;
8977
	mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1;
8815
	mode->vdisplay = (vtot & 0xffff) + 1;
8978
	mode->vdisplay = (vtot & 0xffff) + 1;
8816
	mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1;
8979
	mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1;
8817
	mode->vsync_start = (vsync & 0xffff) + 1;
8980
	mode->vsync_start = (vsync & 0xffff) + 1;
8818
	mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1;
8981
	mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1;
8819
 
8982
 
8820
	drm_mode_set_name(mode);
8983
	drm_mode_set_name(mode);
8821
 
8984
 
8822
	return mode;
8985
	return mode;
8823
}
8986
}
8824
 
-
 
8825
static void intel_increase_pllclock(struct drm_device *dev,
-
 
8826
				    enum pipe pipe)
-
 
8827
{
-
 
8828
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
8829
	int dpll_reg = DPLL(pipe);
-
 
8830
	int dpll;
-
 
8831
 
-
 
8832
	if (!HAS_GMCH_DISPLAY(dev))
-
 
8833
		return;
-
 
8834
 
-
 
8835
	if (!dev_priv->lvds_downclock_avail)
-
 
8836
		return;
-
 
8837
 
-
 
8838
	dpll = I915_READ(dpll_reg);
-
 
8839
	if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) {
-
 
8840
		DRM_DEBUG_DRIVER("upclocking LVDS\n");
-
 
8841
 
-
 
8842
		assert_panel_unlocked(dev_priv, pipe);
-
 
8843
 
-
 
8844
		dpll &= ~DISPLAY_RATE_SELECT_FPA1;
-
 
8845
		I915_WRITE(dpll_reg, dpll);
-
 
8846
		intel_wait_for_vblank(dev, pipe);
-
 
8847
 
-
 
8848
		dpll = I915_READ(dpll_reg);
-
 
8849
		if (dpll & DISPLAY_RATE_SELECT_FPA1)
-
 
8850
			DRM_DEBUG_DRIVER("failed to upclock LVDS!\n");
-
 
8851
	}
-
 
8852
}
-
 
8853
 
8987
 
8854
static void intel_decrease_pllclock(struct drm_crtc *crtc)
8988
static void intel_decrease_pllclock(struct drm_crtc *crtc)
8855
{
8989
{
8856
	struct drm_device *dev = crtc->dev;
8990
	struct drm_device *dev = crtc->dev;
8857
	struct drm_i915_private *dev_priv = dev->dev_private;
8991
	struct drm_i915_private *dev_priv = dev->dev_private;
8858
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8992
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8859
 
8993
 
8860
	if (!HAS_GMCH_DISPLAY(dev))
8994
	if (!HAS_GMCH_DISPLAY(dev))
8861
		return;
8995
		return;
8862
 
8996
 
8863
	if (!dev_priv->lvds_downclock_avail)
8997
	if (!dev_priv->lvds_downclock_avail)
8864
		return;
8998
		return;
8865
 
8999
 
8866
	/*
9000
	/*
8867
	 * Since this is called by a timer, we should never get here in
9001
	 * Since this is called by a timer, we should never get here in
8868
	 * the manual case.
9002
	 * the manual case.
8869
	 */
9003
	 */
8870
	if (!HAS_PIPE_CXSR(dev) && intel_crtc->lowfreq_avail) {
9004
	if (!HAS_PIPE_CXSR(dev) && intel_crtc->lowfreq_avail) {
8871
		int pipe = intel_crtc->pipe;
9005
		int pipe = intel_crtc->pipe;
8872
		int dpll_reg = DPLL(pipe);
9006
		int dpll_reg = DPLL(pipe);
8873
		int dpll;
9007
		int dpll;
8874
 
9008
 
8875
		DRM_DEBUG_DRIVER("downclocking LVDS\n");
9009
		DRM_DEBUG_DRIVER("downclocking LVDS\n");
8876
 
9010
 
8877
		assert_panel_unlocked(dev_priv, pipe);
9011
		assert_panel_unlocked(dev_priv, pipe);
8878
 
9012
 
8879
		dpll = I915_READ(dpll_reg);
9013
		dpll = I915_READ(dpll_reg);
8880
		dpll |= DISPLAY_RATE_SELECT_FPA1;
9014
		dpll |= DISPLAY_RATE_SELECT_FPA1;
8881
		I915_WRITE(dpll_reg, dpll);
9015
		I915_WRITE(dpll_reg, dpll);
8882
		intel_wait_for_vblank(dev, pipe);
9016
		intel_wait_for_vblank(dev, pipe);
8883
		dpll = I915_READ(dpll_reg);
9017
		dpll = I915_READ(dpll_reg);
8884
		if (!(dpll & DISPLAY_RATE_SELECT_FPA1))
9018
		if (!(dpll & DISPLAY_RATE_SELECT_FPA1))
8885
			DRM_DEBUG_DRIVER("failed to downclock LVDS!\n");
9019
			DRM_DEBUG_DRIVER("failed to downclock LVDS!\n");
8886
	}
9020
	}
8887
 
9021
 
8888
}
9022
}
8889
 
9023
 
8890
void intel_mark_busy(struct drm_device *dev)
9024
void intel_mark_busy(struct drm_device *dev)
8891
{
9025
{
8892
	struct drm_i915_private *dev_priv = dev->dev_private;
9026
	struct drm_i915_private *dev_priv = dev->dev_private;
8893
 
9027
 
8894
	if (dev_priv->mm.busy)
9028
	if (dev_priv->mm.busy)
8895
		return;
9029
		return;
8896
 
9030
 
8897
	intel_runtime_pm_get(dev_priv);
9031
	intel_runtime_pm_get(dev_priv);
8898
	i915_update_gfx_val(dev_priv);
9032
	i915_update_gfx_val(dev_priv);
8899
	dev_priv->mm.busy = true;
9033
	dev_priv->mm.busy = true;
8900
}
9034
}
8901
 
9035
 
8902
void intel_mark_idle(struct drm_device *dev)
9036
void intel_mark_idle(struct drm_device *dev)
8903
{
9037
{
8904
	struct drm_i915_private *dev_priv = dev->dev_private;
9038
	struct drm_i915_private *dev_priv = dev->dev_private;
8905
	struct drm_crtc *crtc;
9039
	struct drm_crtc *crtc;
8906
 
9040
 
8907
	if (!dev_priv->mm.busy)
9041
	if (!dev_priv->mm.busy)
8908
		return;
9042
		return;
8909
 
9043
 
8910
	dev_priv->mm.busy = false;
9044
	dev_priv->mm.busy = false;
8911
 
9045
 
8912
	if (!i915.powersave)
9046
	if (!i915.powersave)
8913
		goto out;
9047
		goto out;
8914
 
9048
 
8915
	for_each_crtc(dev, crtc) {
9049
	for_each_crtc(dev, crtc) {
8916
		if (!crtc->primary->fb)
9050
		if (!crtc->primary->fb)
8917
			continue;
9051
			continue;
8918
 
9052
 
8919
		intel_decrease_pllclock(crtc);
9053
		intel_decrease_pllclock(crtc);
8920
	}
9054
	}
8921
 
9055
 
8922
	if (INTEL_INFO(dev)->gen >= 6)
9056
	if (INTEL_INFO(dev)->gen >= 6)
8923
		gen6_rps_idle(dev->dev_private);
9057
		gen6_rps_idle(dev->dev_private);
8924
 
9058
 
8925
out:
9059
out:
8926
	intel_runtime_pm_put(dev_priv);
9060
	intel_runtime_pm_put(dev_priv);
8927
}
9061
}
8928
 
-
 
8929
 
-
 
8930
/**
-
 
8931
 * intel_mark_fb_busy - mark given planes as busy
-
 
8932
 * @dev: DRM device
-
 
8933
 * @frontbuffer_bits: bits for the affected planes
-
 
8934
 * @ring: optional ring for asynchronous commands
-
 
8935
 *
-
 
8936
 * This function gets called every time the screen contents change. It can be
-
 
8937
 * used to keep e.g. the update rate at the nominal refresh rate with DRRS.
-
 
8938
 */
-
 
8939
static void intel_mark_fb_busy(struct drm_device *dev,
-
 
8940
			       unsigned frontbuffer_bits,
-
 
8941
			struct intel_engine_cs *ring)
-
 
8942
{
-
 
8943
	enum pipe pipe;
-
 
8944
 
-
 
8945
	if (!i915.powersave)
-
 
8946
		return;
-
 
8947
 
-
 
8948
	for_each_pipe(pipe) {
-
 
8949
		if (!(frontbuffer_bits & INTEL_FRONTBUFFER_ALL_MASK(pipe)))
-
 
8950
			continue;
-
 
8951
 
-
 
8952
		intel_increase_pllclock(dev, pipe);
-
 
8953
		if (ring && intel_fbc_enabled(dev))
-
 
8954
			ring->fbc_dirty = true;
-
 
8955
	}
-
 
8956
}
-
 
8957
 
-
 
8958
/**
-
 
8959
 * intel_fb_obj_invalidate - invalidate frontbuffer object
-
 
8960
 * @obj: GEM object to invalidate
-
 
8961
 * @ring: set for asynchronous rendering
-
 
8962
 *
-
 
8963
 * This function gets called every time rendering on the given object starts and
-
 
8964
 * frontbuffer caching (fbc, low refresh rate for DRRS, panel self refresh) must
-
 
8965
 * be invalidated. If @ring is non-NULL any subsequent invalidation will be delayed
-
 
8966
 * until the rendering completes or a flip on this frontbuffer plane is
-
 
8967
 * scheduled.
-
 
8968
 */
-
 
8969
void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
-
 
8970
			     struct intel_engine_cs *ring)
-
 
8971
{
-
 
8972
	struct drm_device *dev = obj->base.dev;
-
 
8973
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
8974
 
-
 
8975
	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
-
 
8976
 
-
 
8977
	if (!obj->frontbuffer_bits)
-
 
8978
		return;
-
 
8979
 
-
 
8980
	if (ring) {
-
 
8981
		mutex_lock(&dev_priv->fb_tracking.lock);
-
 
8982
		dev_priv->fb_tracking.busy_bits
-
 
8983
			|= obj->frontbuffer_bits;
-
 
8984
		dev_priv->fb_tracking.flip_bits
-
 
8985
			&= ~obj->frontbuffer_bits;
-
 
8986
		mutex_unlock(&dev_priv->fb_tracking.lock);
-
 
8987
	}
-
 
8988
 
-
 
8989
	intel_mark_fb_busy(dev, obj->frontbuffer_bits, ring);
-
 
8990
 
-
 
8991
	intel_edp_psr_invalidate(dev, obj->frontbuffer_bits);
-
 
8992
}
-
 
8993
 
-
 
8994
/**
-
 
8995
 * intel_frontbuffer_flush - flush frontbuffer
-
 
8996
 * @dev: DRM device
-
 
8997
 * @frontbuffer_bits: frontbuffer plane tracking bits
-
 
8998
 *
-
 
8999
 * This function gets called every time rendering on the given planes has
-
 
9000
 * completed and frontbuffer caching can be started again. Flushes will get
-
 
9001
 * delayed if they're blocked by some oustanding asynchronous rendering.
-
 
9002
 *
-
 
9003
 * Can be called without any locks held.
-
 
9004
 */
-
 
9005
void intel_frontbuffer_flush(struct drm_device *dev,
-
 
9006
			     unsigned frontbuffer_bits)
-
 
9007
{
-
 
9008
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
9009
 
-
 
9010
	/* Delay flushing when rings are still busy.*/
-
 
9011
	mutex_lock(&dev_priv->fb_tracking.lock);
-
 
9012
	frontbuffer_bits &= ~dev_priv->fb_tracking.busy_bits;
-
 
9013
	mutex_unlock(&dev_priv->fb_tracking.lock);
-
 
9014
 
-
 
9015
	intel_mark_fb_busy(dev, frontbuffer_bits, NULL);
-
 
9016
 
-
 
9017
	intel_edp_psr_flush(dev, frontbuffer_bits);
-
 
9018
}
9062
 
9019
static void intel_crtc_destroy(struct drm_crtc *crtc)
9063
static void intel_crtc_destroy(struct drm_crtc *crtc)
9020
{
9064
{
9021
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
9065
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
9022
	struct drm_device *dev = crtc->dev;
9066
	struct drm_device *dev = crtc->dev;
9023
	struct intel_unpin_work *work;
9067
	struct intel_unpin_work *work;
9024
	unsigned long flags;
-
 
9025
 
9068
 
9026
	spin_lock_irqsave(&dev->event_lock, flags);
9069
	spin_lock_irq(&dev->event_lock);
9027
	work = intel_crtc->unpin_work;
9070
	work = intel_crtc->unpin_work;
9028
	intel_crtc->unpin_work = NULL;
9071
	intel_crtc->unpin_work = NULL;
9029
	spin_unlock_irqrestore(&dev->event_lock, flags);
9072
	spin_unlock_irq(&dev->event_lock);
9030
 
9073
 
9031
	if (work) {
9074
	if (work) {
9032
		cancel_work_sync(&work->work);
9075
		cancel_work_sync(&work->work);
9033
		kfree(work);
9076
		kfree(work);
9034
	}
9077
	}
9035
 
9078
 
9036
	drm_crtc_cleanup(crtc);
9079
	drm_crtc_cleanup(crtc);
9037
 
9080
 
9038
	kfree(intel_crtc);
9081
	kfree(intel_crtc);
9039
}
9082
}
9040
 
9083
 
9041
#if 0
9084
#if 0
9042
static void intel_unpin_work_fn(struct work_struct *__work)
9085
static void intel_unpin_work_fn(struct work_struct *__work)
9043
{
9086
{
9044
	struct intel_unpin_work *work =
9087
	struct intel_unpin_work *work =
9045
		container_of(__work, struct intel_unpin_work, work);
9088
		container_of(__work, struct intel_unpin_work, work);
9046
	struct drm_device *dev = work->crtc->dev;
9089
	struct drm_device *dev = work->crtc->dev;
9047
	enum pipe pipe = to_intel_crtc(work->crtc)->pipe;
9090
	enum pipe pipe = to_intel_crtc(work->crtc)->pipe;
9048
 
9091
 
9049
	mutex_lock(&dev->struct_mutex);
9092
	mutex_lock(&dev->struct_mutex);
9050
	intel_unpin_fb_obj(work->old_fb_obj);
9093
	intel_unpin_fb_obj(work->old_fb_obj);
9051
	drm_gem_object_unreference(&work->pending_flip_obj->base);
9094
	drm_gem_object_unreference(&work->pending_flip_obj->base);
9052
	drm_gem_object_unreference(&work->old_fb_obj->base);
9095
	drm_gem_object_unreference(&work->old_fb_obj->base);
9053
 
9096
 
9054
	intel_update_fbc(dev);
9097
	intel_update_fbc(dev);
9055
	mutex_unlock(&dev->struct_mutex);
9098
	mutex_unlock(&dev->struct_mutex);
-
 
9099
 
-
 
9100
	intel_frontbuffer_flip_complete(dev, INTEL_FRONTBUFFER_PRIMARY(pipe));
9056
 
9101
 
9057
	BUG_ON(atomic_read(&to_intel_crtc(work->crtc)->unpin_work_count) == 0);
9102
	BUG_ON(atomic_read(&to_intel_crtc(work->crtc)->unpin_work_count) == 0);
9058
	atomic_dec(&to_intel_crtc(work->crtc)->unpin_work_count);
9103
	atomic_dec(&to_intel_crtc(work->crtc)->unpin_work_count);
9059
 
9104
 
9060
	kfree(work);
9105
	kfree(work);
9061
}
9106
}
9062
 
9107
 
9063
static void do_intel_finish_page_flip(struct drm_device *dev,
9108
static void do_intel_finish_page_flip(struct drm_device *dev,
9064
				      struct drm_crtc *crtc)
9109
				      struct drm_crtc *crtc)
9065
{
9110
{
9066
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
9067
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
9111
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
9068
	struct intel_unpin_work *work;
9112
	struct intel_unpin_work *work;
9069
	unsigned long flags;
9113
	unsigned long flags;
9070
 
9114
 
9071
	/* Ignore early vblank irqs */
9115
	/* Ignore early vblank irqs */
9072
	if (intel_crtc == NULL)
9116
	if (intel_crtc == NULL)
9073
		return;
9117
		return;
-
 
9118
 
-
 
9119
	/*
-
 
9120
	 * This is called both by irq handlers and the reset code (to complete
-
 
9121
	 * lost pageflips) so needs the full irqsave spinlocks.
9074
 
9122
	 */
9075
	spin_lock_irqsave(&dev->event_lock, flags);
9123
	spin_lock_irqsave(&dev->event_lock, flags);
9076
	work = intel_crtc->unpin_work;
9124
	work = intel_crtc->unpin_work;
9077
 
9125
 
9078
	/* Ensure we don't miss a work->pending update ... */
9126
	/* Ensure we don't miss a work->pending update ... */
9079
	smp_rmb();
9127
	smp_rmb();
9080
 
9128
 
9081
	if (work == NULL || atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) {
9129
	if (work == NULL || atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) {
9082
		spin_unlock_irqrestore(&dev->event_lock, flags);
9130
		spin_unlock_irqrestore(&dev->event_lock, flags);
9083
		return;
9131
		return;
9084
	}
9132
	}
9085
 
-
 
9086
	/* and that the unpin work is consistent wrt ->pending. */
-
 
9087
	smp_rmb();
-
 
9088
 
-
 
9089
	intel_crtc->unpin_work = NULL;
-
 
9090
 
-
 
9091
	if (work->event)
-
 
9092
		drm_send_vblank_event(dev, intel_crtc->pipe, work->event);
-
 
9093
 
9133
 
9094
	drm_crtc_vblank_put(crtc);
9134
	page_flip_completed(intel_crtc);
9095
 
-
 
9096
	spin_unlock_irqrestore(&dev->event_lock, flags);
-
 
9097
 
-
 
9098
	wake_up_all(&dev_priv->pending_flip_queue);
-
 
9099
 
-
 
9100
	queue_work(dev_priv->wq, &work->work);
-
 
9101
 
9135
 
9102
	trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj);
9136
	spin_unlock_irqrestore(&dev->event_lock, flags);
9103
}
9137
}
9104
 
9138
 
9105
void intel_finish_page_flip(struct drm_device *dev, int pipe)
9139
void intel_finish_page_flip(struct drm_device *dev, int pipe)
9106
{
9140
{
9107
	struct drm_i915_private *dev_priv = dev->dev_private;
9141
	struct drm_i915_private *dev_priv = dev->dev_private;
9108
	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
9142
	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
9109
 
9143
 
9110
	do_intel_finish_page_flip(dev, crtc);
9144
	do_intel_finish_page_flip(dev, crtc);
9111
}
9145
}
9112
 
9146
 
9113
void intel_finish_page_flip_plane(struct drm_device *dev, int plane)
9147
void intel_finish_page_flip_plane(struct drm_device *dev, int plane)
9114
{
9148
{
9115
	struct drm_i915_private *dev_priv = dev->dev_private;
9149
	struct drm_i915_private *dev_priv = dev->dev_private;
9116
	struct drm_crtc *crtc = dev_priv->plane_to_crtc_mapping[plane];
9150
	struct drm_crtc *crtc = dev_priv->plane_to_crtc_mapping[plane];
9117
 
9151
 
9118
	do_intel_finish_page_flip(dev, crtc);
9152
	do_intel_finish_page_flip(dev, crtc);
9119
}
9153
}
9120
 
9154
 
9121
/* Is 'a' after or equal to 'b'? */
9155
/* Is 'a' after or equal to 'b'? */
9122
static bool g4x_flip_count_after_eq(u32 a, u32 b)
9156
static bool g4x_flip_count_after_eq(u32 a, u32 b)
9123
{
9157
{
9124
	return !((a - b) & 0x80000000);
9158
	return !((a - b) & 0x80000000);
9125
}
9159
}
9126
 
9160
 
9127
static bool page_flip_finished(struct intel_crtc *crtc)
9161
static bool page_flip_finished(struct intel_crtc *crtc)
9128
{
9162
{
9129
	struct drm_device *dev = crtc->base.dev;
9163
	struct drm_device *dev = crtc->base.dev;
9130
	struct drm_i915_private *dev_priv = dev->dev_private;
9164
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
9165
 
-
 
9166
	if (i915_reset_in_progress(&dev_priv->gpu_error) ||
-
 
9167
	    crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
-
 
9168
		return true;
9131
 
9169
 
9132
	/*
9170
	/*
9133
	 * The relevant registers doen't exist on pre-ctg.
9171
	 * The relevant registers doen't exist on pre-ctg.
9134
	 * As the flip done interrupt doesn't trigger for mmio
9172
	 * As the flip done interrupt doesn't trigger for mmio
9135
	 * flips on gmch platforms, a flip count check isn't
9173
	 * flips on gmch platforms, a flip count check isn't
9136
	 * really needed there. But since ctg has the registers,
9174
	 * really needed there. But since ctg has the registers,
9137
	 * include it in the check anyway.
9175
	 * include it in the check anyway.
9138
	 */
9176
	 */
9139
	if (INTEL_INFO(dev)->gen < 5 && !IS_G4X(dev))
9177
	if (INTEL_INFO(dev)->gen < 5 && !IS_G4X(dev))
9140
		return true;
9178
		return true;
9141
 
9179
 
9142
	/*
9180
	/*
9143
	 * A DSPSURFLIVE check isn't enough in case the mmio and CS flips
9181
	 * A DSPSURFLIVE check isn't enough in case the mmio and CS flips
9144
	 * used the same base address. In that case the mmio flip might
9182
	 * used the same base address. In that case the mmio flip might
9145
	 * have completed, but the CS hasn't even executed the flip yet.
9183
	 * have completed, but the CS hasn't even executed the flip yet.
9146
	 *
9184
	 *
9147
	 * A flip count check isn't enough as the CS might have updated
9185
	 * A flip count check isn't enough as the CS might have updated
9148
	 * the base address just after start of vblank, but before we
9186
	 * the base address just after start of vblank, but before we
9149
	 * managed to process the interrupt. This means we'd complete the
9187
	 * managed to process the interrupt. This means we'd complete the
9150
	 * CS flip too soon.
9188
	 * CS flip too soon.
9151
	 *
9189
	 *
9152
	 * Combining both checks should get us a good enough result. It may
9190
	 * Combining both checks should get us a good enough result. It may
9153
	 * still happen that the CS flip has been executed, but has not
9191
	 * still happen that the CS flip has been executed, but has not
9154
	 * yet actually completed. But in case the base address is the same
9192
	 * yet actually completed. But in case the base address is the same
9155
	 * anyway, we don't really care.
9193
	 * anyway, we don't really care.
9156
	 */
9194
	 */
9157
	return (I915_READ(DSPSURFLIVE(crtc->plane)) & ~0xfff) ==
9195
	return (I915_READ(DSPSURFLIVE(crtc->plane)) & ~0xfff) ==
9158
		crtc->unpin_work->gtt_offset &&
9196
		crtc->unpin_work->gtt_offset &&
9159
		g4x_flip_count_after_eq(I915_READ(PIPE_FLIPCOUNT_GM45(crtc->pipe)),
9197
		g4x_flip_count_after_eq(I915_READ(PIPE_FLIPCOUNT_GM45(crtc->pipe)),
9160
				    crtc->unpin_work->flip_count);
9198
				    crtc->unpin_work->flip_count);
9161
}
9199
}
9162
 
9200
 
9163
void intel_prepare_page_flip(struct drm_device *dev, int plane)
9201
void intel_prepare_page_flip(struct drm_device *dev, int plane)
9164
{
9202
{
9165
	struct drm_i915_private *dev_priv = dev->dev_private;
9203
	struct drm_i915_private *dev_priv = dev->dev_private;
9166
	struct intel_crtc *intel_crtc =
9204
	struct intel_crtc *intel_crtc =
9167
		to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]);
9205
		to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]);
9168
	unsigned long flags;
9206
	unsigned long flags;
-
 
9207
 
-
 
9208
 
-
 
9209
	/*
-
 
9210
	 * This is called both by irq handlers and the reset code (to complete
-
 
9211
	 * lost pageflips) so needs the full irqsave spinlocks.
9169
 
9212
	 *
9170
	/* NB: An MMIO update of the plane base pointer will also
9213
	 * NB: An MMIO update of the plane base pointer will also
9171
	 * generate a page-flip completion irq, i.e. every modeset
9214
	 * generate a page-flip completion irq, i.e. every modeset
9172
	 * is also accompanied by a spurious intel_prepare_page_flip().
9215
	 * is also accompanied by a spurious intel_prepare_page_flip().
9173
	 */
9216
	 */
9174
	spin_lock_irqsave(&dev->event_lock, flags);
9217
	spin_lock_irqsave(&dev->event_lock, flags);
9175
	if (intel_crtc->unpin_work && page_flip_finished(intel_crtc))
9218
	if (intel_crtc->unpin_work && page_flip_finished(intel_crtc))
9176
		atomic_inc_not_zero(&intel_crtc->unpin_work->pending);
9219
		atomic_inc_not_zero(&intel_crtc->unpin_work->pending);
9177
	spin_unlock_irqrestore(&dev->event_lock, flags);
9220
	spin_unlock_irqrestore(&dev->event_lock, flags);
9178
}
9221
}
9179
 
9222
 
9180
static inline void intel_mark_page_flip_active(struct intel_crtc *intel_crtc)
9223
static inline void intel_mark_page_flip_active(struct intel_crtc *intel_crtc)
9181
{
9224
{
9182
	/* Ensure that the work item is consistent when activating it ... */
9225
	/* Ensure that the work item is consistent when activating it ... */
9183
	smp_wmb();
9226
	smp_wmb();
9184
	atomic_set(&intel_crtc->unpin_work->pending, INTEL_FLIP_PENDING);
9227
	atomic_set(&intel_crtc->unpin_work->pending, INTEL_FLIP_PENDING);
9185
	/* and that it is marked active as soon as the irq could fire. */
9228
	/* and that it is marked active as soon as the irq could fire. */
9186
	smp_wmb();
9229
	smp_wmb();
9187
}
9230
}
9188
 
9231
 
9189
static int intel_gen2_queue_flip(struct drm_device *dev,
9232
static int intel_gen2_queue_flip(struct drm_device *dev,
9190
				 struct drm_crtc *crtc,
9233
				 struct drm_crtc *crtc,
9191
				 struct drm_framebuffer *fb,
9234
				 struct drm_framebuffer *fb,
9192
				 struct drm_i915_gem_object *obj,
9235
				 struct drm_i915_gem_object *obj,
9193
				 struct intel_engine_cs *ring,
9236
				 struct intel_engine_cs *ring,
9194
				 uint32_t flags)
9237
				 uint32_t flags)
9195
{
9238
{
9196
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
9239
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
9197
	u32 flip_mask;
9240
	u32 flip_mask;
9198
	int ret;
9241
	int ret;
9199
 
9242
 
9200
	ret = intel_ring_begin(ring, 6);
9243
	ret = intel_ring_begin(ring, 6);
9201
	if (ret)
9244
	if (ret)
9202
		return ret;
9245
		return ret;
9203
 
9246
 
9204
	/* Can't queue multiple flips, so wait for the previous
9247
	/* Can't queue multiple flips, so wait for the previous
9205
	 * one to finish before executing the next.
9248
	 * one to finish before executing the next.
9206
	 */
9249
	 */
9207
	if (intel_crtc->plane)
9250
	if (intel_crtc->plane)
9208
		flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
9251
		flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
9209
	else
9252
	else
9210
		flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
9253
		flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
9211
	intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
9254
	intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
9212
	intel_ring_emit(ring, MI_NOOP);
9255
	intel_ring_emit(ring, MI_NOOP);
9213
	intel_ring_emit(ring, MI_DISPLAY_FLIP |
9256
	intel_ring_emit(ring, MI_DISPLAY_FLIP |
9214
			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
9257
			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
9215
	intel_ring_emit(ring, fb->pitches[0]);
9258
	intel_ring_emit(ring, fb->pitches[0]);
9216
	intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
9259
	intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
9217
	intel_ring_emit(ring, 0); /* aux display base address, unused */
9260
	intel_ring_emit(ring, 0); /* aux display base address, unused */
9218
 
9261
 
9219
	intel_mark_page_flip_active(intel_crtc);
9262
	intel_mark_page_flip_active(intel_crtc);
9220
	__intel_ring_advance(ring);
9263
	__intel_ring_advance(ring);
9221
	return 0;
9264
	return 0;
9222
}
9265
}
9223
 
9266
 
9224
static int intel_gen3_queue_flip(struct drm_device *dev,
9267
static int intel_gen3_queue_flip(struct drm_device *dev,
9225
				 struct drm_crtc *crtc,
9268
				 struct drm_crtc *crtc,
9226
				 struct drm_framebuffer *fb,
9269
				 struct drm_framebuffer *fb,
9227
				 struct drm_i915_gem_object *obj,
9270
				 struct drm_i915_gem_object *obj,
9228
				 struct intel_engine_cs *ring,
9271
				 struct intel_engine_cs *ring,
9229
				 uint32_t flags)
9272
				 uint32_t flags)
9230
{
9273
{
9231
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
9274
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
9232
	u32 flip_mask;
9275
	u32 flip_mask;
9233
	int ret;
9276
	int ret;
9234
 
9277
 
9235
	ret = intel_ring_begin(ring, 6);
9278
	ret = intel_ring_begin(ring, 6);
9236
	if (ret)
9279
	if (ret)
9237
		return ret;
9280
		return ret;
9238
 
9281
 
9239
	if (intel_crtc->plane)
9282
	if (intel_crtc->plane)
9240
		flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
9283
		flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
9241
	else
9284
	else
9242
		flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
9285
		flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
9243
	intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
9286
	intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
9244
	intel_ring_emit(ring, MI_NOOP);
9287
	intel_ring_emit(ring, MI_NOOP);
9245
	intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 |
9288
	intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 |
9246
			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
9289
			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
9247
	intel_ring_emit(ring, fb->pitches[0]);
9290
	intel_ring_emit(ring, fb->pitches[0]);
9248
	intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
9291
	intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
9249
	intel_ring_emit(ring, MI_NOOP);
9292
	intel_ring_emit(ring, MI_NOOP);
9250
 
9293
 
9251
	intel_mark_page_flip_active(intel_crtc);
9294
	intel_mark_page_flip_active(intel_crtc);
9252
	__intel_ring_advance(ring);
9295
	__intel_ring_advance(ring);
9253
	return 0;
9296
	return 0;
9254
}
9297
}
9255
 
9298
 
9256
static int intel_gen4_queue_flip(struct drm_device *dev,
9299
static int intel_gen4_queue_flip(struct drm_device *dev,
9257
				 struct drm_crtc *crtc,
9300
				 struct drm_crtc *crtc,
9258
				 struct drm_framebuffer *fb,
9301
				 struct drm_framebuffer *fb,
9259
				 struct drm_i915_gem_object *obj,
9302
				 struct drm_i915_gem_object *obj,
9260
				 struct intel_engine_cs *ring,
9303
				 struct intel_engine_cs *ring,
9261
				 uint32_t flags)
9304
				 uint32_t flags)
9262
{
9305
{
9263
	struct drm_i915_private *dev_priv = dev->dev_private;
9306
	struct drm_i915_private *dev_priv = dev->dev_private;
9264
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
9307
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
9265
	uint32_t pf, pipesrc;
9308
	uint32_t pf, pipesrc;
9266
	int ret;
9309
	int ret;
9267
 
9310
 
9268
	ret = intel_ring_begin(ring, 4);
9311
	ret = intel_ring_begin(ring, 4);
9269
	if (ret)
9312
	if (ret)
9270
		return ret;
9313
		return ret;
9271
 
9314
 
9272
	/* i965+ uses the linear or tiled offsets from the
9315
	/* i965+ uses the linear or tiled offsets from the
9273
	 * Display Registers (which do not change across a page-flip)
9316
	 * Display Registers (which do not change across a page-flip)
9274
	 * so we need only reprogram the base address.
9317
	 * so we need only reprogram the base address.
9275
	 */
9318
	 */
9276
	intel_ring_emit(ring, MI_DISPLAY_FLIP |
9319
	intel_ring_emit(ring, MI_DISPLAY_FLIP |
9277
			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
9320
			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
9278
	intel_ring_emit(ring, fb->pitches[0]);
9321
	intel_ring_emit(ring, fb->pitches[0]);
9279
	intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset |
9322
	intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset |
9280
			obj->tiling_mode);
9323
			obj->tiling_mode);
9281
 
9324
 
9282
	/* XXX Enabling the panel-fitter across page-flip is so far
9325
	/* XXX Enabling the panel-fitter across page-flip is so far
9283
	 * untested on non-native modes, so ignore it for now.
9326
	 * untested on non-native modes, so ignore it for now.
9284
	 * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
9327
	 * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
9285
	 */
9328
	 */
9286
	pf = 0;
9329
	pf = 0;
9287
	pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
9330
	pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
9288
	intel_ring_emit(ring, pf | pipesrc);
9331
	intel_ring_emit(ring, pf | pipesrc);
9289
 
9332
 
9290
	intel_mark_page_flip_active(intel_crtc);
9333
	intel_mark_page_flip_active(intel_crtc);
9291
	__intel_ring_advance(ring);
9334
	__intel_ring_advance(ring);
9292
	return 0;
9335
	return 0;
9293
}
9336
}
9294
 
9337
 
9295
static int intel_gen6_queue_flip(struct drm_device *dev,
9338
static int intel_gen6_queue_flip(struct drm_device *dev,
9296
				 struct drm_crtc *crtc,
9339
				 struct drm_crtc *crtc,
9297
				 struct drm_framebuffer *fb,
9340
				 struct drm_framebuffer *fb,
9298
				 struct drm_i915_gem_object *obj,
9341
				 struct drm_i915_gem_object *obj,
9299
				 struct intel_engine_cs *ring,
9342
				 struct intel_engine_cs *ring,
9300
				 uint32_t flags)
9343
				 uint32_t flags)
9301
{
9344
{
9302
	struct drm_i915_private *dev_priv = dev->dev_private;
9345
	struct drm_i915_private *dev_priv = dev->dev_private;
9303
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
9346
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
9304
	uint32_t pf, pipesrc;
9347
	uint32_t pf, pipesrc;
9305
	int ret;
9348
	int ret;
9306
 
9349
 
9307
	ret = intel_ring_begin(ring, 4);
9350
	ret = intel_ring_begin(ring, 4);
9308
	if (ret)
9351
	if (ret)
9309
		return ret;
9352
		return ret;
9310
 
9353
 
9311
	intel_ring_emit(ring, MI_DISPLAY_FLIP |
9354
	intel_ring_emit(ring, MI_DISPLAY_FLIP |
9312
			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
9355
			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
9313
	intel_ring_emit(ring, fb->pitches[0] | obj->tiling_mode);
9356
	intel_ring_emit(ring, fb->pitches[0] | obj->tiling_mode);
9314
	intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
9357
	intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
9315
 
9358
 
9316
	/* Contrary to the suggestions in the documentation,
9359
	/* Contrary to the suggestions in the documentation,
9317
	 * "Enable Panel Fitter" does not seem to be required when page
9360
	 * "Enable Panel Fitter" does not seem to be required when page
9318
	 * flipping with a non-native mode, and worse causes a normal
9361
	 * flipping with a non-native mode, and worse causes a normal
9319
	 * modeset to fail.
9362
	 * modeset to fail.
9320
	 * pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE;
9363
	 * pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE;
9321
	 */
9364
	 */
9322
	pf = 0;
9365
	pf = 0;
9323
	pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
9366
	pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
9324
	intel_ring_emit(ring, pf | pipesrc);
9367
	intel_ring_emit(ring, pf | pipesrc);
9325
 
9368
 
9326
	intel_mark_page_flip_active(intel_crtc);
9369
	intel_mark_page_flip_active(intel_crtc);
9327
	__intel_ring_advance(ring);
9370
	__intel_ring_advance(ring);
9328
	return 0;
9371
	return 0;
9329
}
9372
}
9330
 
9373
 
9331
static int intel_gen7_queue_flip(struct drm_device *dev,
9374
static int intel_gen7_queue_flip(struct drm_device *dev,
9332
				 struct drm_crtc *crtc,
9375
				 struct drm_crtc *crtc,
9333
				 struct drm_framebuffer *fb,
9376
				 struct drm_framebuffer *fb,
9334
				 struct drm_i915_gem_object *obj,
9377
				 struct drm_i915_gem_object *obj,
9335
				 struct intel_engine_cs *ring,
9378
				 struct intel_engine_cs *ring,
9336
				 uint32_t flags)
9379
				 uint32_t flags)
9337
{
9380
{
9338
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
9381
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
9339
	uint32_t plane_bit = 0;
9382
	uint32_t plane_bit = 0;
9340
	int len, ret;
9383
	int len, ret;
9341
 
9384
 
9342
	switch (intel_crtc->plane) {
9385
	switch (intel_crtc->plane) {
9343
	case PLANE_A:
9386
	case PLANE_A:
9344
		plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_A;
9387
		plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_A;
9345
		break;
9388
		break;
9346
	case PLANE_B:
9389
	case PLANE_B:
9347
		plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_B;
9390
		plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_B;
9348
		break;
9391
		break;
9349
	case PLANE_C:
9392
	case PLANE_C:
9350
		plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_C;
9393
		plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_C;
9351
		break;
9394
		break;
9352
	default:
9395
	default:
9353
		WARN_ONCE(1, "unknown plane in flip command\n");
9396
		WARN_ONCE(1, "unknown plane in flip command\n");
9354
		return -ENODEV;
9397
		return -ENODEV;
9355
	}
9398
	}
9356
 
9399
 
9357
	len = 4;
9400
	len = 4;
9358
	if (ring->id == RCS) {
9401
	if (ring->id == RCS) {
9359
		len += 6;
9402
		len += 6;
9360
		/*
9403
		/*
9361
		 * On Gen 8, SRM is now taking an extra dword to accommodate
9404
		 * On Gen 8, SRM is now taking an extra dword to accommodate
9362
		 * 48bits addresses, and we need a NOOP for the batch size to
9405
		 * 48bits addresses, and we need a NOOP for the batch size to
9363
		 * stay even.
9406
		 * stay even.
9364
		 */
9407
		 */
9365
		if (IS_GEN8(dev))
9408
		if (IS_GEN8(dev))
9366
			len += 2;
9409
			len += 2;
9367
	}
9410
	}
9368
 
9411
 
9369
	/*
9412
	/*
9370
	 * BSpec MI_DISPLAY_FLIP for IVB:
9413
	 * BSpec MI_DISPLAY_FLIP for IVB:
9371
	 * "The full packet must be contained within the same cache line."
9414
	 * "The full packet must be contained within the same cache line."
9372
	 *
9415
	 *
9373
	 * Currently the LRI+SRM+MI_DISPLAY_FLIP all fit within the same
9416
	 * Currently the LRI+SRM+MI_DISPLAY_FLIP all fit within the same
9374
	 * cacheline, if we ever start emitting more commands before
9417
	 * cacheline, if we ever start emitting more commands before
9375
	 * the MI_DISPLAY_FLIP we may need to first emit everything else,
9418
	 * the MI_DISPLAY_FLIP we may need to first emit everything else,
9376
	 * then do the cacheline alignment, and finally emit the
9419
	 * then do the cacheline alignment, and finally emit the
9377
	 * MI_DISPLAY_FLIP.
9420
	 * MI_DISPLAY_FLIP.
9378
	 */
9421
	 */
9379
	ret = intel_ring_cacheline_align(ring);
9422
	ret = intel_ring_cacheline_align(ring);
9380
	if (ret)
9423
	if (ret)
9381
		return ret;
9424
		return ret;
9382
 
9425
 
9383
	ret = intel_ring_begin(ring, len);
9426
	ret = intel_ring_begin(ring, len);
9384
	if (ret)
9427
	if (ret)
9385
		return ret;
9428
		return ret;
9386
 
9429
 
9387
	/* Unmask the flip-done completion message. Note that the bspec says that
9430
	/* Unmask the flip-done completion message. Note that the bspec says that
9388
	 * we should do this for both the BCS and RCS, and that we must not unmask
9431
	 * we should do this for both the BCS and RCS, and that we must not unmask
9389
	 * more than one flip event at any time (or ensure that one flip message
9432
	 * more than one flip event at any time (or ensure that one flip message
9390
	 * can be sent by waiting for flip-done prior to queueing new flips).
9433
	 * can be sent by waiting for flip-done prior to queueing new flips).
9391
	 * Experimentation says that BCS works despite DERRMR masking all
9434
	 * Experimentation says that BCS works despite DERRMR masking all
9392
	 * flip-done completion events and that unmasking all planes at once
9435
	 * flip-done completion events and that unmasking all planes at once
9393
	 * for the RCS also doesn't appear to drop events. Setting the DERRMR
9436
	 * for the RCS also doesn't appear to drop events. Setting the DERRMR
9394
	 * to zero does lead to lockups within MI_DISPLAY_FLIP.
9437
	 * to zero does lead to lockups within MI_DISPLAY_FLIP.
9395
	 */
9438
	 */
9396
	if (ring->id == RCS) {
9439
	if (ring->id == RCS) {
9397
		intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
9440
		intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
9398
		intel_ring_emit(ring, DERRMR);
9441
		intel_ring_emit(ring, DERRMR);
9399
		intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
9442
		intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
9400
					DERRMR_PIPEB_PRI_FLIP_DONE |
9443
					DERRMR_PIPEB_PRI_FLIP_DONE |
9401
					DERRMR_PIPEC_PRI_FLIP_DONE));
9444
					DERRMR_PIPEC_PRI_FLIP_DONE));
9402
		if (IS_GEN8(dev))
9445
		if (IS_GEN8(dev))
9403
			intel_ring_emit(ring, MI_STORE_REGISTER_MEM_GEN8(1) |
9446
			intel_ring_emit(ring, MI_STORE_REGISTER_MEM_GEN8(1) |
9404
					      MI_SRM_LRM_GLOBAL_GTT);
9447
					      MI_SRM_LRM_GLOBAL_GTT);
9405
		else
9448
		else
9406
		intel_ring_emit(ring, MI_STORE_REGISTER_MEM(1) |
9449
		intel_ring_emit(ring, MI_STORE_REGISTER_MEM(1) |
9407
				MI_SRM_LRM_GLOBAL_GTT);
9450
				MI_SRM_LRM_GLOBAL_GTT);
9408
		intel_ring_emit(ring, DERRMR);
9451
		intel_ring_emit(ring, DERRMR);
9409
		intel_ring_emit(ring, ring->scratch.gtt_offset + 256);
9452
		intel_ring_emit(ring, ring->scratch.gtt_offset + 256);
9410
		if (IS_GEN8(dev)) {
9453
		if (IS_GEN8(dev)) {
9411
			intel_ring_emit(ring, 0);
9454
			intel_ring_emit(ring, 0);
9412
			intel_ring_emit(ring, MI_NOOP);
9455
			intel_ring_emit(ring, MI_NOOP);
9413
		}
9456
		}
9414
	}
9457
	}
9415
 
9458
 
9416
	intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit);
9459
	intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit);
9417
	intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
9460
	intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
9418
	intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
9461
	intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
9419
	intel_ring_emit(ring, (MI_NOOP));
9462
	intel_ring_emit(ring, (MI_NOOP));
9420
 
9463
 
9421
	intel_mark_page_flip_active(intel_crtc);
9464
	intel_mark_page_flip_active(intel_crtc);
9422
	__intel_ring_advance(ring);
9465
	__intel_ring_advance(ring);
9423
	return 0;
9466
	return 0;
9424
}
9467
}
9425
 
9468
 
9426
static int intel_default_queue_flip(struct drm_device *dev,
9469
static int intel_default_queue_flip(struct drm_device *dev,
9427
				    struct drm_crtc *crtc,
9470
				    struct drm_crtc *crtc,
9428
				    struct drm_framebuffer *fb,
9471
				    struct drm_framebuffer *fb,
9429
				    struct drm_i915_gem_object *obj,
9472
				    struct drm_i915_gem_object *obj,
9430
				    struct intel_engine_cs *ring,
9473
				    struct intel_engine_cs *ring,
9431
				    uint32_t flags)
9474
				    uint32_t flags)
9432
{
9475
{
9433
	return -ENODEV;
9476
	return -ENODEV;
9434
}
9477
}
9435
 
9478
 
9436
static int intel_crtc_page_flip(struct drm_crtc *crtc,
9479
static int intel_crtc_page_flip(struct drm_crtc *crtc,
9437
				struct drm_framebuffer *fb,
9480
				struct drm_framebuffer *fb,
9438
				struct drm_pending_vblank_event *event,
9481
				struct drm_pending_vblank_event *event,
9439
				uint32_t page_flip_flags)
9482
				uint32_t page_flip_flags)
9440
{
9483
{
9441
	struct drm_device *dev = crtc->dev;
9484
	struct drm_device *dev = crtc->dev;
9442
	struct drm_i915_private *dev_priv = dev->dev_private;
9485
	struct drm_i915_private *dev_priv = dev->dev_private;
9443
	struct drm_framebuffer *old_fb = crtc->primary->fb;
9486
	struct drm_framebuffer *old_fb = crtc->primary->fb;
9444
	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
9487
	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
9445
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
9488
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
9446
	enum pipe pipe = intel_crtc->pipe;
9489
	enum pipe pipe = intel_crtc->pipe;
9447
	struct intel_unpin_work *work;
9490
	struct intel_unpin_work *work;
9448
	struct intel_engine_cs *ring;
9491
	struct intel_engine_cs *ring;
9449
	unsigned long flags;
-
 
9450
	int ret;
9492
	int ret;
9451
 
9493
 
9452
	/*
9494
	/*
9453
	 * drm_mode_page_flip_ioctl() should already catch this, but double
9495
	 * drm_mode_page_flip_ioctl() should already catch this, but double
9454
	 * check to be safe.  In the future we may enable pageflipping from
9496
	 * check to be safe.  In the future we may enable pageflipping from
9455
	 * a disabled primary plane.
9497
	 * a disabled primary plane.
9456
	 */
9498
	 */
9457
	if (WARN_ON(intel_fb_obj(old_fb) == NULL))
9499
	if (WARN_ON(intel_fb_obj(old_fb) == NULL))
9458
		return -EBUSY;
9500
		return -EBUSY;
9459
 
9501
 
9460
	/* Can't change pixel format via MI display flips. */
9502
	/* Can't change pixel format via MI display flips. */
9461
	if (fb->pixel_format != crtc->primary->fb->pixel_format)
9503
	if (fb->pixel_format != crtc->primary->fb->pixel_format)
9462
		return -EINVAL;
9504
		return -EINVAL;
9463
 
9505
 
9464
	/*
9506
	/*
9465
	 * TILEOFF/LINOFF registers can't be changed via MI display flips.
9507
	 * TILEOFF/LINOFF registers can't be changed via MI display flips.
9466
	 * Note that pitch changes could also affect these register.
9508
	 * Note that pitch changes could also affect these register.
9467
	 */
9509
	 */
9468
	if (INTEL_INFO(dev)->gen > 3 &&
9510
	if (INTEL_INFO(dev)->gen > 3 &&
9469
	    (fb->offsets[0] != crtc->primary->fb->offsets[0] ||
9511
	    (fb->offsets[0] != crtc->primary->fb->offsets[0] ||
9470
	     fb->pitches[0] != crtc->primary->fb->pitches[0]))
9512
	     fb->pitches[0] != crtc->primary->fb->pitches[0]))
9471
		return -EINVAL;
9513
		return -EINVAL;
-
 
9514
 
-
 
9515
	if (i915_terminally_wedged(&dev_priv->gpu_error))
-
 
9516
		goto out_hang;
9472
 
9517
 
9473
	work = kzalloc(sizeof(*work), GFP_KERNEL);
9518
	work = kzalloc(sizeof(*work), GFP_KERNEL);
9474
	if (work == NULL)
9519
	if (work == NULL)
9475
		return -ENOMEM;
9520
		return -ENOMEM;
9476
 
9521
 
9477
	work->event = event;
9522
	work->event = event;
9478
	work->crtc = crtc;
9523
	work->crtc = crtc;
9479
	work->old_fb_obj = intel_fb_obj(old_fb);
9524
	work->old_fb_obj = intel_fb_obj(old_fb);
9480
	INIT_WORK(&work->work, intel_unpin_work_fn);
9525
	INIT_WORK(&work->work, intel_unpin_work_fn);
9481
 
9526
 
9482
	ret = drm_crtc_vblank_get(crtc);
9527
	ret = drm_crtc_vblank_get(crtc);
9483
	if (ret)
9528
	if (ret)
9484
		goto free_work;
9529
		goto free_work;
9485
 
9530
 
9486
	/* We borrow the event spin lock for protecting unpin_work */
9531
	/* We borrow the event spin lock for protecting unpin_work */
9487
	spin_lock_irqsave(&dev->event_lock, flags);
9532
	spin_lock_irq(&dev->event_lock);
9488
	if (intel_crtc->unpin_work) {
9533
	if (intel_crtc->unpin_work) {
9489
		spin_unlock_irqrestore(&dev->event_lock, flags);
9534
		/* Before declaring the flip queue wedged, check if
-
 
9535
		 * the hardware completed the operation behind our backs.
9490
		kfree(work);
9536
		 */
-
 
9537
		if (__intel_pageflip_stall_check(dev, crtc)) {
-
 
9538
			DRM_DEBUG_DRIVER("flip queue: previous flip completed, continuing\n");
9491
		drm_crtc_vblank_put(crtc);
9539
			page_flip_completed(intel_crtc);
9492
 
9540
		} else {
9493
		DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
9541
			DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
-
 
9542
			spin_unlock_irq(&dev->event_lock);
-
 
9543
 
-
 
9544
			drm_crtc_vblank_put(crtc);
-
 
9545
		kfree(work);
9494
		return -EBUSY;
9546
		return -EBUSY;
9495
	}
9547
	}
-
 
9548
	}
9496
	intel_crtc->unpin_work = work;
9549
	intel_crtc->unpin_work = work;
9497
	spin_unlock_irqrestore(&dev->event_lock, flags);
9550
	spin_unlock_irq(&dev->event_lock);
9498
 
9551
 
9499
	if (atomic_read(&intel_crtc->unpin_work_count) >= 2)
9552
	if (atomic_read(&intel_crtc->unpin_work_count) >= 2)
9500
		flush_workqueue(dev_priv->wq);
9553
		flush_workqueue(dev_priv->wq);
9501
 
9554
 
9502
	ret = i915_mutex_lock_interruptible(dev);
9555
	ret = i915_mutex_lock_interruptible(dev);
9503
	if (ret)
9556
	if (ret)
9504
		goto cleanup;
9557
		goto cleanup;
9505
 
9558
 
9506
	/* Reference the objects for the scheduled work. */
9559
	/* Reference the objects for the scheduled work. */
9507
	drm_gem_object_reference(&work->old_fb_obj->base);
9560
	drm_gem_object_reference(&work->old_fb_obj->base);
9508
	drm_gem_object_reference(&obj->base);
9561
	drm_gem_object_reference(&obj->base);
9509
 
9562
 
9510
	crtc->primary->fb = fb;
9563
	crtc->primary->fb = fb;
9511
 
9564
 
9512
	work->pending_flip_obj = obj;
9565
	work->pending_flip_obj = obj;
9513
 
-
 
9514
	work->enable_stall_check = true;
-
 
9515
 
9566
 
9516
	atomic_inc(&intel_crtc->unpin_work_count);
9567
	atomic_inc(&intel_crtc->unpin_work_count);
9517
	intel_crtc->reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
9568
	intel_crtc->reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
9518
 
9569
 
9519
	if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
9570
	if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
9520
		work->flip_count = I915_READ(PIPE_FLIPCOUNT_GM45(pipe)) + 1;
9571
		work->flip_count = I915_READ(PIPE_FLIPCOUNT_GM45(pipe)) + 1;
9521
 
9572
 
9522
	if (IS_VALLEYVIEW(dev)) {
9573
	if (IS_VALLEYVIEW(dev)) {
9523
		ring = &dev_priv->ring[BCS];
9574
		ring = &dev_priv->ring[BCS];
9524
		if (obj->tiling_mode != work->old_fb_obj->tiling_mode)
9575
		if (obj->tiling_mode != work->old_fb_obj->tiling_mode)
9525
			/* vlv: DISPLAY_FLIP fails to change tiling */
9576
			/* vlv: DISPLAY_FLIP fails to change tiling */
9526
			ring = NULL;
9577
			ring = NULL;
9527
	} else if (IS_IVYBRIDGE(dev)) {
9578
	} else if (IS_IVYBRIDGE(dev)) {
9528
		ring = &dev_priv->ring[BCS];
9579
		ring = &dev_priv->ring[BCS];
9529
	} else if (INTEL_INFO(dev)->gen >= 7) {
9580
	} else if (INTEL_INFO(dev)->gen >= 7) {
9530
		ring = obj->ring;
9581
		ring = obj->ring;
9531
		if (ring == NULL || ring->id != RCS)
9582
		if (ring == NULL || ring->id != RCS)
9532
			ring = &dev_priv->ring[BCS];
9583
			ring = &dev_priv->ring[BCS];
9533
	} else {
9584
	} else {
9534
		ring = &dev_priv->ring[RCS];
9585
		ring = &dev_priv->ring[RCS];
9535
	}
9586
	}
9536
 
9587
 
9537
	ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
9588
	ret = intel_pin_and_fence_fb_obj(crtc->primary, fb, ring);
9538
	if (ret)
9589
	if (ret)
9539
		goto cleanup_pending;
9590
		goto cleanup_pending;
9540
 
9591
 
9541
	work->gtt_offset =
9592
	work->gtt_offset =
9542
		i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset;
9593
		i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset;
9543
 
9594
 
9544
	if (use_mmio_flip(ring, obj))
9595
	if (use_mmio_flip(ring, obj)) {
9545
		ret = intel_queue_mmio_flip(dev, crtc, fb, obj, ring,
9596
		ret = intel_queue_mmio_flip(dev, crtc, fb, obj, ring,
-
 
9597
					    page_flip_flags);
-
 
9598
		if (ret)
-
 
9599
			goto cleanup_unpin;
-
 
9600
 
-
 
9601
		work->flip_queued_seqno = obj->last_write_seqno;
9546
					    page_flip_flags);
9602
		work->flip_queued_ring = obj->ring;
9547
	else
9603
	} else {
9548
		ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, ring,
9604
		ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, ring,
9549
				page_flip_flags);
9605
				page_flip_flags);
9550
	if (ret)
9606
	if (ret)
9551
		goto cleanup_unpin;
9607
		goto cleanup_unpin;
-
 
9608
 
-
 
9609
		work->flip_queued_seqno = intel_ring_get_seqno(ring);
-
 
9610
		work->flip_queued_ring = ring;
-
 
9611
	}
-
 
9612
 
-
 
9613
	work->flip_queued_vblank = drm_vblank_count(dev, intel_crtc->pipe);
-
 
9614
	work->enable_stall_check = true;
9552
 
9615
 
9553
	i915_gem_track_fb(work->old_fb_obj, obj,
9616
	i915_gem_track_fb(work->old_fb_obj, obj,
9554
			  INTEL_FRONTBUFFER_PRIMARY(pipe));
9617
			  INTEL_FRONTBUFFER_PRIMARY(pipe));
9555
 
9618
 
9556
	intel_disable_fbc(dev);
9619
	intel_disable_fbc(dev);
9557
	intel_frontbuffer_flip_prepare(dev, INTEL_FRONTBUFFER_PRIMARY(pipe));
9620
	intel_frontbuffer_flip_prepare(dev, INTEL_FRONTBUFFER_PRIMARY(pipe));
9558
	mutex_unlock(&dev->struct_mutex);
9621
	mutex_unlock(&dev->struct_mutex);
9559
 
9622
 
9560
	trace_i915_flip_request(intel_crtc->plane, obj);
9623
	trace_i915_flip_request(intel_crtc->plane, obj);
9561
 
9624
 
9562
	return 0;
9625
	return 0;
9563
 
9626
 
9564
cleanup_unpin:
9627
cleanup_unpin:
9565
	intel_unpin_fb_obj(obj);
9628
	intel_unpin_fb_obj(obj);
9566
cleanup_pending:
9629
cleanup_pending:
9567
	atomic_dec(&intel_crtc->unpin_work_count);
9630
	atomic_dec(&intel_crtc->unpin_work_count);
9568
	crtc->primary->fb = old_fb;
9631
	crtc->primary->fb = old_fb;
9569
	drm_gem_object_unreference(&work->old_fb_obj->base);
9632
	drm_gem_object_unreference(&work->old_fb_obj->base);
9570
	drm_gem_object_unreference(&obj->base);
9633
	drm_gem_object_unreference(&obj->base);
9571
	mutex_unlock(&dev->struct_mutex);
9634
	mutex_unlock(&dev->struct_mutex);
9572
 
9635
 
9573
cleanup:
9636
cleanup:
9574
	spin_lock_irqsave(&dev->event_lock, flags);
9637
	spin_lock_irq(&dev->event_lock);
9575
	intel_crtc->unpin_work = NULL;
9638
	intel_crtc->unpin_work = NULL;
9576
	spin_unlock_irqrestore(&dev->event_lock, flags);
9639
	spin_unlock_irq(&dev->event_lock);
9577
 
9640
 
9578
	drm_crtc_vblank_put(crtc);
9641
	drm_crtc_vblank_put(crtc);
9579
free_work:
9642
free_work:
9580
	kfree(work);
9643
	kfree(work);
9581
 
9644
 
9582
	if (ret == -EIO) {
9645
	if (ret == -EIO) {
9583
out_hang:
9646
out_hang:
9584
		intel_crtc_wait_for_pending_flips(crtc);
9647
//       intel_crtc_wait_for_pending_flips(crtc);
9585
		ret = intel_pipe_set_base(crtc, crtc->x, crtc->y, fb);
9648
		ret = intel_pipe_set_base(crtc, crtc->x, crtc->y, fb);
9586
		if (ret == 0 && event)
9649
		if (ret == 0 && event) {
-
 
9650
			spin_lock_irq(&dev->event_lock);
9587
			drm_send_vblank_event(dev, pipe, event);
9651
			drm_send_vblank_event(dev, pipe, event);
-
 
9652
			spin_unlock_irq(&dev->event_lock);
-
 
9653
		}
9588
	}
9654
	}
9589
	return ret;
9655
	return ret;
9590
}
9656
}
9591
#endif
9657
#endif
9592
 
9658
 
9593
static struct drm_crtc_helper_funcs intel_helper_funcs = {
9659
static struct drm_crtc_helper_funcs intel_helper_funcs = {
9594
	.mode_set_base_atomic = intel_pipe_set_base_atomic,
9660
	.mode_set_base_atomic = intel_pipe_set_base_atomic,
9595
	.load_lut = intel_crtc_load_lut,
9661
	.load_lut = intel_crtc_load_lut,
9596
};
9662
};
9597
 
9663
 
9598
/**
9664
/**
9599
 * intel_modeset_update_staged_output_state
9665
 * intel_modeset_update_staged_output_state
9600
 *
9666
 *
9601
 * Updates the staged output configuration state, e.g. after we've read out the
9667
 * Updates the staged output configuration state, e.g. after we've read out the
9602
 * current hw state.
9668
 * current hw state.
9603
 */
9669
 */
9604
static void intel_modeset_update_staged_output_state(struct drm_device *dev)
9670
static void intel_modeset_update_staged_output_state(struct drm_device *dev)
9605
{
9671
{
9606
	struct intel_crtc *crtc;
9672
	struct intel_crtc *crtc;
9607
	struct intel_encoder *encoder;
9673
	struct intel_encoder *encoder;
9608
	struct intel_connector *connector;
9674
	struct intel_connector *connector;
9609
 
9675
 
9610
	list_for_each_entry(connector, &dev->mode_config.connector_list,
9676
	list_for_each_entry(connector, &dev->mode_config.connector_list,
9611
			    base.head) {
9677
			    base.head) {
9612
		connector->new_encoder =
9678
		connector->new_encoder =
9613
			to_intel_encoder(connector->base.encoder);
9679
			to_intel_encoder(connector->base.encoder);
9614
	}
9680
	}
9615
 
9681
 
9616
	list_for_each_entry(encoder, &dev->mode_config.encoder_list,
-
 
9617
			    base.head) {
9682
	for_each_intel_encoder(dev, encoder) {
9618
		encoder->new_crtc =
9683
		encoder->new_crtc =
9619
			to_intel_crtc(encoder->base.crtc);
9684
			to_intel_crtc(encoder->base.crtc);
9620
	}
9685
	}
9621
 
9686
 
9622
	for_each_intel_crtc(dev, crtc) {
9687
	for_each_intel_crtc(dev, crtc) {
9623
		crtc->new_enabled = crtc->base.enabled;
9688
		crtc->new_enabled = crtc->base.enabled;
9624
 
9689
 
9625
		if (crtc->new_enabled)
9690
		if (crtc->new_enabled)
9626
			crtc->new_config = &crtc->config;
9691
			crtc->new_config = &crtc->config;
9627
		else
9692
		else
9628
			crtc->new_config = NULL;
9693
			crtc->new_config = NULL;
9629
	}
9694
	}
9630
}
9695
}
9631
 
9696
 
9632
/**
9697
/**
9633
 * intel_modeset_commit_output_state
9698
 * intel_modeset_commit_output_state
9634
 *
9699
 *
9635
 * This function copies the stage display pipe configuration to the real one.
9700
 * This function copies the stage display pipe configuration to the real one.
9636
 */
9701
 */
9637
static void intel_modeset_commit_output_state(struct drm_device *dev)
9702
static void intel_modeset_commit_output_state(struct drm_device *dev)
9638
{
9703
{
9639
	struct intel_crtc *crtc;
9704
	struct intel_crtc *crtc;
9640
	struct intel_encoder *encoder;
9705
	struct intel_encoder *encoder;
9641
	struct intel_connector *connector;
9706
	struct intel_connector *connector;
9642
 
9707
 
9643
	list_for_each_entry(connector, &dev->mode_config.connector_list,
9708
	list_for_each_entry(connector, &dev->mode_config.connector_list,
9644
			    base.head) {
9709
			    base.head) {
9645
		connector->base.encoder = &connector->new_encoder->base;
9710
		connector->base.encoder = &connector->new_encoder->base;
9646
	}
9711
	}
9647
 
9712
 
9648
	list_for_each_entry(encoder, &dev->mode_config.encoder_list,
-
 
9649
			    base.head) {
9713
	for_each_intel_encoder(dev, encoder) {
9650
		encoder->base.crtc = &encoder->new_crtc->base;
9714
		encoder->base.crtc = &encoder->new_crtc->base;
9651
	}
9715
	}
9652
 
9716
 
9653
	for_each_intel_crtc(dev, crtc) {
9717
	for_each_intel_crtc(dev, crtc) {
9654
		crtc->base.enabled = crtc->new_enabled;
9718
		crtc->base.enabled = crtc->new_enabled;
9655
	}
9719
	}
9656
}
9720
}
9657
 
9721
 
9658
static void
9722
static void
9659
connected_sink_compute_bpp(struct intel_connector *connector,
9723
connected_sink_compute_bpp(struct intel_connector *connector,
9660
			   struct intel_crtc_config *pipe_config)
9724
			   struct intel_crtc_config *pipe_config)
9661
{
9725
{
9662
	int bpp = pipe_config->pipe_bpp;
9726
	int bpp = pipe_config->pipe_bpp;
9663
 
9727
 
9664
	DRM_DEBUG_KMS("[CONNECTOR:%d:%s] checking for sink bpp constrains\n",
9728
	DRM_DEBUG_KMS("[CONNECTOR:%d:%s] checking for sink bpp constrains\n",
9665
		connector->base.base.id,
9729
		connector->base.base.id,
9666
		connector->base.name);
9730
		connector->base.name);
9667
 
9731
 
9668
	/* Don't use an invalid EDID bpc value */
9732
	/* Don't use an invalid EDID bpc value */
9669
	if (connector->base.display_info.bpc &&
9733
	if (connector->base.display_info.bpc &&
9670
	    connector->base.display_info.bpc * 3 < bpp) {
9734
	    connector->base.display_info.bpc * 3 < bpp) {
9671
		DRM_DEBUG_KMS("clamping display bpp (was %d) to EDID reported max of %d\n",
9735
		DRM_DEBUG_KMS("clamping display bpp (was %d) to EDID reported max of %d\n",
9672
			      bpp, connector->base.display_info.bpc*3);
9736
			      bpp, connector->base.display_info.bpc*3);
9673
		pipe_config->pipe_bpp = connector->base.display_info.bpc*3;
9737
		pipe_config->pipe_bpp = connector->base.display_info.bpc*3;
9674
	}
9738
	}
9675
 
9739
 
9676
	/* Clamp bpp to 8 on screens without EDID 1.4 */
9740
	/* Clamp bpp to 8 on screens without EDID 1.4 */
9677
	if (connector->base.display_info.bpc == 0 && bpp > 24) {
9741
	if (connector->base.display_info.bpc == 0 && bpp > 24) {
9678
		DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of 24\n",
9742
		DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of 24\n",
9679
			      bpp);
9743
			      bpp);
9680
		pipe_config->pipe_bpp = 24;
9744
		pipe_config->pipe_bpp = 24;
9681
	}
9745
	}
9682
}
9746
}
9683
 
9747
 
9684
static int
9748
static int
9685
compute_baseline_pipe_bpp(struct intel_crtc *crtc,
9749
compute_baseline_pipe_bpp(struct intel_crtc *crtc,
9686
		    struct drm_framebuffer *fb,
9750
		    struct drm_framebuffer *fb,
9687
		    struct intel_crtc_config *pipe_config)
9751
		    struct intel_crtc_config *pipe_config)
9688
{
9752
{
9689
	struct drm_device *dev = crtc->base.dev;
9753
	struct drm_device *dev = crtc->base.dev;
9690
	struct intel_connector *connector;
9754
	struct intel_connector *connector;
9691
	int bpp;
9755
	int bpp;
9692
 
9756
 
9693
	switch (fb->pixel_format) {
9757
	switch (fb->pixel_format) {
9694
	case DRM_FORMAT_C8:
9758
	case DRM_FORMAT_C8:
9695
		bpp = 8*3; /* since we go through a colormap */
9759
		bpp = 8*3; /* since we go through a colormap */
9696
		break;
9760
		break;
9697
	case DRM_FORMAT_XRGB1555:
9761
	case DRM_FORMAT_XRGB1555:
9698
	case DRM_FORMAT_ARGB1555:
9762
	case DRM_FORMAT_ARGB1555:
9699
		/* checked in intel_framebuffer_init already */
9763
		/* checked in intel_framebuffer_init already */
9700
		if (WARN_ON(INTEL_INFO(dev)->gen > 3))
9764
		if (WARN_ON(INTEL_INFO(dev)->gen > 3))
9701
			return -EINVAL;
9765
			return -EINVAL;
9702
	case DRM_FORMAT_RGB565:
9766
	case DRM_FORMAT_RGB565:
9703
		bpp = 6*3; /* min is 18bpp */
9767
		bpp = 6*3; /* min is 18bpp */
9704
		break;
9768
		break;
9705
	case DRM_FORMAT_XBGR8888:
9769
	case DRM_FORMAT_XBGR8888:
9706
	case DRM_FORMAT_ABGR8888:
9770
	case DRM_FORMAT_ABGR8888:
9707
		/* checked in intel_framebuffer_init already */
9771
		/* checked in intel_framebuffer_init already */
9708
		if (WARN_ON(INTEL_INFO(dev)->gen < 4))
9772
		if (WARN_ON(INTEL_INFO(dev)->gen < 4))
9709
			return -EINVAL;
9773
			return -EINVAL;
9710
	case DRM_FORMAT_XRGB8888:
9774
	case DRM_FORMAT_XRGB8888:
9711
	case DRM_FORMAT_ARGB8888:
9775
	case DRM_FORMAT_ARGB8888:
9712
		bpp = 8*3;
9776
		bpp = 8*3;
9713
		break;
9777
		break;
9714
	case DRM_FORMAT_XRGB2101010:
9778
	case DRM_FORMAT_XRGB2101010:
9715
	case DRM_FORMAT_ARGB2101010:
9779
	case DRM_FORMAT_ARGB2101010:
9716
	case DRM_FORMAT_XBGR2101010:
9780
	case DRM_FORMAT_XBGR2101010:
9717
	case DRM_FORMAT_ABGR2101010:
9781
	case DRM_FORMAT_ABGR2101010:
9718
		/* checked in intel_framebuffer_init already */
9782
		/* checked in intel_framebuffer_init already */
9719
		if (WARN_ON(INTEL_INFO(dev)->gen < 4))
9783
		if (WARN_ON(INTEL_INFO(dev)->gen < 4))
9720
			return -EINVAL;
9784
			return -EINVAL;
9721
		bpp = 10*3;
9785
		bpp = 10*3;
9722
		break;
9786
		break;
9723
	/* TODO: gen4+ supports 16 bpc floating point, too. */
9787
	/* TODO: gen4+ supports 16 bpc floating point, too. */
9724
	default:
9788
	default:
9725
		DRM_DEBUG_KMS("unsupported depth\n");
9789
		DRM_DEBUG_KMS("unsupported depth\n");
9726
		return -EINVAL;
9790
		return -EINVAL;
9727
	}
9791
	}
9728
 
9792
 
9729
	pipe_config->pipe_bpp = bpp;
9793
	pipe_config->pipe_bpp = bpp;
9730
 
9794
 
9731
	/* Clamp display bpp to EDID value */
9795
	/* Clamp display bpp to EDID value */
9732
	list_for_each_entry(connector, &dev->mode_config.connector_list,
9796
	list_for_each_entry(connector, &dev->mode_config.connector_list,
9733
			    base.head) {
9797
			    base.head) {
9734
		if (!connector->new_encoder ||
9798
		if (!connector->new_encoder ||
9735
		    connector->new_encoder->new_crtc != crtc)
9799
		    connector->new_encoder->new_crtc != crtc)
9736
			continue;
9800
			continue;
9737
 
9801
 
9738
		connected_sink_compute_bpp(connector, pipe_config);
9802
		connected_sink_compute_bpp(connector, pipe_config);
9739
	}
9803
	}
9740
 
9804
 
9741
	return bpp;
9805
	return bpp;
9742
}
9806
}
9743
 
9807
 
9744
static void intel_dump_crtc_timings(const struct drm_display_mode *mode)
9808
static void intel_dump_crtc_timings(const struct drm_display_mode *mode)
9745
{
9809
{
9746
	DRM_DEBUG_KMS("crtc timings: %d %d %d %d %d %d %d %d %d, "
9810
	DRM_DEBUG_KMS("crtc timings: %d %d %d %d %d %d %d %d %d, "
9747
			"type: 0x%x flags: 0x%x\n",
9811
			"type: 0x%x flags: 0x%x\n",
9748
		mode->crtc_clock,
9812
		mode->crtc_clock,
9749
		mode->crtc_hdisplay, mode->crtc_hsync_start,
9813
		mode->crtc_hdisplay, mode->crtc_hsync_start,
9750
		mode->crtc_hsync_end, mode->crtc_htotal,
9814
		mode->crtc_hsync_end, mode->crtc_htotal,
9751
		mode->crtc_vdisplay, mode->crtc_vsync_start,
9815
		mode->crtc_vdisplay, mode->crtc_vsync_start,
9752
		mode->crtc_vsync_end, mode->crtc_vtotal, mode->type, mode->flags);
9816
		mode->crtc_vsync_end, mode->crtc_vtotal, mode->type, mode->flags);
9753
}
9817
}
9754
 
9818
 
9755
static void intel_dump_pipe_config(struct intel_crtc *crtc,
9819
static void intel_dump_pipe_config(struct intel_crtc *crtc,
9756
				   struct intel_crtc_config *pipe_config,
9820
				   struct intel_crtc_config *pipe_config,
9757
				   const char *context)
9821
				   const char *context)
9758
{
9822
{
9759
	DRM_DEBUG_KMS("[CRTC:%d]%s config for pipe %c\n", crtc->base.base.id,
9823
	DRM_DEBUG_KMS("[CRTC:%d]%s config for pipe %c\n", crtc->base.base.id,
9760
		      context, pipe_name(crtc->pipe));
9824
		      context, pipe_name(crtc->pipe));
9761
 
9825
 
9762
	DRM_DEBUG_KMS("cpu_transcoder: %c\n", transcoder_name(pipe_config->cpu_transcoder));
9826
	DRM_DEBUG_KMS("cpu_transcoder: %c\n", transcoder_name(pipe_config->cpu_transcoder));
9763
	DRM_DEBUG_KMS("pipe bpp: %i, dithering: %i\n",
9827
	DRM_DEBUG_KMS("pipe bpp: %i, dithering: %i\n",
9764
		      pipe_config->pipe_bpp, pipe_config->dither);
9828
		      pipe_config->pipe_bpp, pipe_config->dither);
9765
	DRM_DEBUG_KMS("fdi/pch: %i, lanes: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
9829
	DRM_DEBUG_KMS("fdi/pch: %i, lanes: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
9766
		      pipe_config->has_pch_encoder,
9830
		      pipe_config->has_pch_encoder,
9767
		      pipe_config->fdi_lanes,
9831
		      pipe_config->fdi_lanes,
9768
		      pipe_config->fdi_m_n.gmch_m, pipe_config->fdi_m_n.gmch_n,
9832
		      pipe_config->fdi_m_n.gmch_m, pipe_config->fdi_m_n.gmch_n,
9769
		      pipe_config->fdi_m_n.link_m, pipe_config->fdi_m_n.link_n,
9833
		      pipe_config->fdi_m_n.link_m, pipe_config->fdi_m_n.link_n,
9770
		      pipe_config->fdi_m_n.tu);
9834
		      pipe_config->fdi_m_n.tu);
9771
	DRM_DEBUG_KMS("dp: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
9835
	DRM_DEBUG_KMS("dp: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
9772
		      pipe_config->has_dp_encoder,
9836
		      pipe_config->has_dp_encoder,
9773
		      pipe_config->dp_m_n.gmch_m, pipe_config->dp_m_n.gmch_n,
9837
		      pipe_config->dp_m_n.gmch_m, pipe_config->dp_m_n.gmch_n,
9774
		      pipe_config->dp_m_n.link_m, pipe_config->dp_m_n.link_n,
9838
		      pipe_config->dp_m_n.link_m, pipe_config->dp_m_n.link_n,
9775
		      pipe_config->dp_m_n.tu);
9839
		      pipe_config->dp_m_n.tu);
-
 
9840
 
-
 
9841
	DRM_DEBUG_KMS("dp: %i, gmch_m2: %u, gmch_n2: %u, link_m2: %u, link_n2: %u, tu2: %u\n",
-
 
9842
		      pipe_config->has_dp_encoder,
-
 
9843
		      pipe_config->dp_m2_n2.gmch_m,
-
 
9844
		      pipe_config->dp_m2_n2.gmch_n,
-
 
9845
		      pipe_config->dp_m2_n2.link_m,
-
 
9846
		      pipe_config->dp_m2_n2.link_n,
-
 
9847
		      pipe_config->dp_m2_n2.tu);
-
 
9848
 
-
 
9849
	DRM_DEBUG_KMS("audio: %i, infoframes: %i\n",
-
 
9850
		      pipe_config->has_audio,
-
 
9851
		      pipe_config->has_infoframe);
-
 
9852
 
9776
	DRM_DEBUG_KMS("requested mode:\n");
9853
	DRM_DEBUG_KMS("requested mode:\n");
9777
	drm_mode_debug_printmodeline(&pipe_config->requested_mode);
9854
	drm_mode_debug_printmodeline(&pipe_config->requested_mode);
9778
	DRM_DEBUG_KMS("adjusted mode:\n");
9855
	DRM_DEBUG_KMS("adjusted mode:\n");
9779
	drm_mode_debug_printmodeline(&pipe_config->adjusted_mode);
9856
	drm_mode_debug_printmodeline(&pipe_config->adjusted_mode);
9780
	intel_dump_crtc_timings(&pipe_config->adjusted_mode);
9857
	intel_dump_crtc_timings(&pipe_config->adjusted_mode);
9781
	DRM_DEBUG_KMS("port clock: %d\n", pipe_config->port_clock);
9858
	DRM_DEBUG_KMS("port clock: %d\n", pipe_config->port_clock);
9782
	DRM_DEBUG_KMS("pipe src size: %dx%d\n",
9859
	DRM_DEBUG_KMS("pipe src size: %dx%d\n",
9783
		      pipe_config->pipe_src_w, pipe_config->pipe_src_h);
9860
		      pipe_config->pipe_src_w, pipe_config->pipe_src_h);
9784
	DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
9861
	DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
9785
		      pipe_config->gmch_pfit.control,
9862
		      pipe_config->gmch_pfit.control,
9786
		      pipe_config->gmch_pfit.pgm_ratios,
9863
		      pipe_config->gmch_pfit.pgm_ratios,
9787
		      pipe_config->gmch_pfit.lvds_border_bits);
9864
		      pipe_config->gmch_pfit.lvds_border_bits);
9788
	DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s\n",
9865
	DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s\n",
9789
		      pipe_config->pch_pfit.pos,
9866
		      pipe_config->pch_pfit.pos,
9790
		      pipe_config->pch_pfit.size,
9867
		      pipe_config->pch_pfit.size,
9791
		      pipe_config->pch_pfit.enabled ? "enabled" : "disabled");
9868
		      pipe_config->pch_pfit.enabled ? "enabled" : "disabled");
9792
	DRM_DEBUG_KMS("ips: %i\n", pipe_config->ips_enabled);
9869
	DRM_DEBUG_KMS("ips: %i\n", pipe_config->ips_enabled);
9793
	DRM_DEBUG_KMS("double wide: %i\n", pipe_config->double_wide);
9870
	DRM_DEBUG_KMS("double wide: %i\n", pipe_config->double_wide);
9794
}
9871
}
9795
 
9872
 
9796
static bool encoders_cloneable(const struct intel_encoder *a,
9873
static bool encoders_cloneable(const struct intel_encoder *a,
9797
			       const struct intel_encoder *b)
9874
			       const struct intel_encoder *b)
9798
{
9875
{
9799
	/* masks could be asymmetric, so check both ways */
9876
	/* masks could be asymmetric, so check both ways */
9800
	return a == b || (a->cloneable & (1 << b->type) &&
9877
	return a == b || (a->cloneable & (1 << b->type) &&
9801
			  b->cloneable & (1 << a->type));
9878
			  b->cloneable & (1 << a->type));
9802
}
9879
}
9803
 
9880
 
9804
static bool check_single_encoder_cloning(struct intel_crtc *crtc,
9881
static bool check_single_encoder_cloning(struct intel_crtc *crtc,
9805
					 struct intel_encoder *encoder)
9882
					 struct intel_encoder *encoder)
9806
{
9883
{
9807
	struct drm_device *dev = crtc->base.dev;
9884
	struct drm_device *dev = crtc->base.dev;
9808
	struct intel_encoder *source_encoder;
9885
	struct intel_encoder *source_encoder;
9809
 
9886
 
9810
	list_for_each_entry(source_encoder,
-
 
9811
			    &dev->mode_config.encoder_list, base.head) {
9887
	for_each_intel_encoder(dev, source_encoder) {
9812
		if (source_encoder->new_crtc != crtc)
9888
		if (source_encoder->new_crtc != crtc)
9813
			continue;
9889
			continue;
9814
 
9890
 
9815
		if (!encoders_cloneable(encoder, source_encoder))
9891
		if (!encoders_cloneable(encoder, source_encoder))
9816
			return false;
9892
			return false;
9817
	}
9893
	}
9818
 
9894
 
9819
	return true;
9895
	return true;
9820
}
9896
}
9821
 
9897
 
9822
static bool check_encoder_cloning(struct intel_crtc *crtc)
9898
static bool check_encoder_cloning(struct intel_crtc *crtc)
9823
{
9899
{
9824
	struct drm_device *dev = crtc->base.dev;
9900
	struct drm_device *dev = crtc->base.dev;
9825
	struct intel_encoder *encoder;
9901
	struct intel_encoder *encoder;
9826
 
9902
 
9827
	list_for_each_entry(encoder,
-
 
9828
			    &dev->mode_config.encoder_list, base.head) {
9903
	for_each_intel_encoder(dev, encoder) {
9829
		if (encoder->new_crtc != crtc)
9904
		if (encoder->new_crtc != crtc)
9830
			continue;
9905
			continue;
9831
 
9906
 
9832
		if (!check_single_encoder_cloning(crtc, encoder))
9907
		if (!check_single_encoder_cloning(crtc, encoder))
9833
			return false;
9908
			return false;
9834
	}
9909
	}
9835
 
9910
 
9836
	return true;
9911
	return true;
9837
}
9912
}
-
 
9913
 
-
 
9914
static bool check_digital_port_conflicts(struct drm_device *dev)
-
 
9915
{
-
 
9916
	struct intel_connector *connector;
-
 
9917
	unsigned int used_ports = 0;
-
 
9918
 
-
 
9919
	/*
-
 
9920
	 * Walk the connector list instead of the encoder
-
 
9921
	 * list to detect the problem on ddi platforms
-
 
9922
	 * where there's just one encoder per digital port.
-
 
9923
	 */
-
 
9924
	list_for_each_entry(connector,
-
 
9925
			    &dev->mode_config.connector_list, base.head) {
-
 
9926
		struct intel_encoder *encoder = connector->new_encoder;
-
 
9927
 
-
 
9928
		if (!encoder)
-
 
9929
			continue;
-
 
9930
 
-
 
9931
		WARN_ON(!encoder->new_crtc);
-
 
9932
 
-
 
9933
		switch (encoder->type) {
-
 
9934
			unsigned int port_mask;
-
 
9935
		case INTEL_OUTPUT_UNKNOWN:
-
 
9936
			if (WARN_ON(!HAS_DDI(dev)))
-
 
9937
				break;
-
 
9938
		case INTEL_OUTPUT_DISPLAYPORT:
-
 
9939
		case INTEL_OUTPUT_HDMI:
-
 
9940
		case INTEL_OUTPUT_EDP:
-
 
9941
			port_mask = 1 << enc_to_dig_port(&encoder->base)->port;
-
 
9942
 
-
 
9943
			/* the same port mustn't appear more than once */
-
 
9944
			if (used_ports & port_mask)
-
 
9945
				return false;
-
 
9946
 
-
 
9947
			used_ports |= port_mask;
-
 
9948
		default:
-
 
9949
			break;
-
 
9950
		}
-
 
9951
	}
-
 
9952
 
-
 
9953
	return true;
-
 
9954
}
9838
 
9955
 
9839
static struct intel_crtc_config *
9956
static struct intel_crtc_config *
9840
intel_modeset_pipe_config(struct drm_crtc *crtc,
9957
intel_modeset_pipe_config(struct drm_crtc *crtc,
9841
			  struct drm_framebuffer *fb,
9958
			  struct drm_framebuffer *fb,
9842
			    struct drm_display_mode *mode)
9959
			    struct drm_display_mode *mode)
9843
{
9960
{
9844
	struct drm_device *dev = crtc->dev;
9961
	struct drm_device *dev = crtc->dev;
9845
	struct intel_encoder *encoder;
9962
	struct intel_encoder *encoder;
9846
	struct intel_crtc_config *pipe_config;
9963
	struct intel_crtc_config *pipe_config;
9847
	int plane_bpp, ret = -EINVAL;
9964
	int plane_bpp, ret = -EINVAL;
9848
	bool retry = true;
9965
	bool retry = true;
9849
 
9966
 
9850
	if (!check_encoder_cloning(to_intel_crtc(crtc))) {
9967
	if (!check_encoder_cloning(to_intel_crtc(crtc))) {
9851
		DRM_DEBUG_KMS("rejecting invalid cloning configuration\n");
9968
		DRM_DEBUG_KMS("rejecting invalid cloning configuration\n");
9852
		return ERR_PTR(-EINVAL);
9969
		return ERR_PTR(-EINVAL);
9853
	}
9970
	}
-
 
9971
 
-
 
9972
	if (!check_digital_port_conflicts(dev)) {
-
 
9973
		DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n");
-
 
9974
		return ERR_PTR(-EINVAL);
-
 
9975
	}
9854
 
9976
 
9855
	pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL);
9977
	pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL);
9856
	if (!pipe_config)
9978
	if (!pipe_config)
9857
		return ERR_PTR(-ENOMEM);
9979
		return ERR_PTR(-ENOMEM);
9858
 
9980
 
9859
	drm_mode_copy(&pipe_config->adjusted_mode, mode);
9981
	drm_mode_copy(&pipe_config->adjusted_mode, mode);
9860
	drm_mode_copy(&pipe_config->requested_mode, mode);
9982
	drm_mode_copy(&pipe_config->requested_mode, mode);
9861
 
9983
 
9862
	pipe_config->cpu_transcoder =
9984
	pipe_config->cpu_transcoder =
9863
		(enum transcoder) to_intel_crtc(crtc)->pipe;
9985
		(enum transcoder) to_intel_crtc(crtc)->pipe;
9864
	pipe_config->shared_dpll = DPLL_ID_PRIVATE;
9986
	pipe_config->shared_dpll = DPLL_ID_PRIVATE;
9865
 
9987
 
9866
	/*
9988
	/*
9867
	 * Sanitize sync polarity flags based on requested ones. If neither
9989
	 * Sanitize sync polarity flags based on requested ones. If neither
9868
	 * positive or negative polarity is requested, treat this as meaning
9990
	 * positive or negative polarity is requested, treat this as meaning
9869
	 * negative polarity.
9991
	 * negative polarity.
9870
	 */
9992
	 */
9871
	if (!(pipe_config->adjusted_mode.flags &
9993
	if (!(pipe_config->adjusted_mode.flags &
9872
	      (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
9994
	      (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
9873
		pipe_config->adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
9995
		pipe_config->adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
9874
 
9996
 
9875
	if (!(pipe_config->adjusted_mode.flags &
9997
	if (!(pipe_config->adjusted_mode.flags &
9876
	      (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
9998
	      (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
9877
		pipe_config->adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
9999
		pipe_config->adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
9878
 
10000
 
9879
	/* Compute a starting value for pipe_config->pipe_bpp taking the source
10001
	/* Compute a starting value for pipe_config->pipe_bpp taking the source
9880
	 * plane pixel format and any sink constraints into account. Returns the
10002
	 * plane pixel format and any sink constraints into account. Returns the
9881
	 * source plane bpp so that dithering can be selected on mismatches
10003
	 * source plane bpp so that dithering can be selected on mismatches
9882
	 * after encoders and crtc also have had their say. */
10004
	 * after encoders and crtc also have had their say. */
9883
	plane_bpp = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
10005
	plane_bpp = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
9884
					      fb, pipe_config);
10006
					      fb, pipe_config);
9885
	if (plane_bpp < 0)
10007
	if (plane_bpp < 0)
9886
		goto fail;
10008
		goto fail;
9887
 
10009
 
9888
	/*
10010
	/*
9889
	 * Determine the real pipe dimensions. Note that stereo modes can
10011
	 * Determine the real pipe dimensions. Note that stereo modes can
9890
	 * increase the actual pipe size due to the frame doubling and
10012
	 * increase the actual pipe size due to the frame doubling and
9891
	 * insertion of additional space for blanks between the frame. This
10013
	 * insertion of additional space for blanks between the frame. This
9892
	 * is stored in the crtc timings. We use the requested mode to do this
10014
	 * is stored in the crtc timings. We use the requested mode to do this
9893
	 * computation to clearly distinguish it from the adjusted mode, which
10015
	 * computation to clearly distinguish it from the adjusted mode, which
9894
	 * can be changed by the connectors in the below retry loop.
10016
	 * can be changed by the connectors in the below retry loop.
9895
	 */
10017
	 */
9896
	drm_mode_set_crtcinfo(&pipe_config->requested_mode, CRTC_STEREO_DOUBLE);
10018
	drm_mode_set_crtcinfo(&pipe_config->requested_mode, CRTC_STEREO_DOUBLE);
9897
	pipe_config->pipe_src_w = pipe_config->requested_mode.crtc_hdisplay;
10019
	pipe_config->pipe_src_w = pipe_config->requested_mode.crtc_hdisplay;
9898
	pipe_config->pipe_src_h = pipe_config->requested_mode.crtc_vdisplay;
10020
	pipe_config->pipe_src_h = pipe_config->requested_mode.crtc_vdisplay;
9899
 
10021
 
9900
encoder_retry:
10022
encoder_retry:
9901
	/* Ensure the port clock defaults are reset when retrying. */
10023
	/* Ensure the port clock defaults are reset when retrying. */
9902
	pipe_config->port_clock = 0;
10024
	pipe_config->port_clock = 0;
9903
	pipe_config->pixel_multiplier = 1;
10025
	pipe_config->pixel_multiplier = 1;
9904
 
10026
 
9905
	/* Fill in default crtc timings, allow encoders to overwrite them. */
10027
	/* Fill in default crtc timings, allow encoders to overwrite them. */
9906
	drm_mode_set_crtcinfo(&pipe_config->adjusted_mode, CRTC_STEREO_DOUBLE);
10028
	drm_mode_set_crtcinfo(&pipe_config->adjusted_mode, CRTC_STEREO_DOUBLE);
9907
 
10029
 
9908
	/* Pass our mode to the connectors and the CRTC to give them a chance to
10030
	/* Pass our mode to the connectors and the CRTC to give them a chance to
9909
	 * adjust it according to limitations or connector properties, and also
10031
	 * adjust it according to limitations or connector properties, and also
9910
	 * a chance to reject the mode entirely.
10032
	 * a chance to reject the mode entirely.
9911
	 */
10033
	 */
9912
	list_for_each_entry(encoder, &dev->mode_config.encoder_list,
10034
	for_each_intel_encoder(dev, encoder) {
9913
			    base.head) {
-
 
9914
 
10035
 
9915
		if (&encoder->new_crtc->base != crtc)
10036
		if (&encoder->new_crtc->base != crtc)
9916
			continue;
10037
			continue;
9917
 
10038
 
9918
			if (!(encoder->compute_config(encoder, pipe_config))) {
10039
			if (!(encoder->compute_config(encoder, pipe_config))) {
9919
				DRM_DEBUG_KMS("Encoder config failure\n");
10040
				DRM_DEBUG_KMS("Encoder config failure\n");
9920
				goto fail;
10041
				goto fail;
9921
			}
10042
			}
9922
		}
10043
		}
9923
 
10044
 
9924
	/* Set default port clock if not overwritten by the encoder. Needs to be
10045
	/* Set default port clock if not overwritten by the encoder. Needs to be
9925
	 * done afterwards in case the encoder adjusts the mode. */
10046
	 * done afterwards in case the encoder adjusts the mode. */
9926
	if (!pipe_config->port_clock)
10047
	if (!pipe_config->port_clock)
9927
		pipe_config->port_clock = pipe_config->adjusted_mode.crtc_clock
10048
		pipe_config->port_clock = pipe_config->adjusted_mode.crtc_clock
9928
			* pipe_config->pixel_multiplier;
10049
			* pipe_config->pixel_multiplier;
9929
 
10050
 
9930
	ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
10051
	ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
9931
	if (ret < 0) {
10052
	if (ret < 0) {
9932
		DRM_DEBUG_KMS("CRTC fixup failed\n");
10053
		DRM_DEBUG_KMS("CRTC fixup failed\n");
9933
		goto fail;
10054
		goto fail;
9934
	}
10055
	}
9935
 
10056
 
9936
	if (ret == RETRY) {
10057
	if (ret == RETRY) {
9937
		if (WARN(!retry, "loop in pipe configuration computation\n")) {
10058
		if (WARN(!retry, "loop in pipe configuration computation\n")) {
9938
			ret = -EINVAL;
10059
			ret = -EINVAL;
9939
			goto fail;
10060
			goto fail;
9940
		}
10061
		}
9941
 
10062
 
9942
		DRM_DEBUG_KMS("CRTC bw constrained, retrying\n");
10063
		DRM_DEBUG_KMS("CRTC bw constrained, retrying\n");
9943
		retry = false;
10064
		retry = false;
9944
		goto encoder_retry;
10065
		goto encoder_retry;
9945
	}
10066
	}
9946
 
10067
 
9947
	pipe_config->dither = pipe_config->pipe_bpp != plane_bpp;
10068
	pipe_config->dither = pipe_config->pipe_bpp != plane_bpp;
9948
	DRM_DEBUG_KMS("plane bpp: %i, pipe bpp: %i, dithering: %i\n",
10069
	DRM_DEBUG_KMS("plane bpp: %i, pipe bpp: %i, dithering: %i\n",
9949
		      plane_bpp, pipe_config->pipe_bpp, pipe_config->dither);
10070
		      plane_bpp, pipe_config->pipe_bpp, pipe_config->dither);
9950
 
10071
 
9951
	return pipe_config;
10072
	return pipe_config;
9952
fail:
10073
fail:
9953
	kfree(pipe_config);
10074
	kfree(pipe_config);
9954
	return ERR_PTR(ret);
10075
	return ERR_PTR(ret);
9955
}
10076
}
9956
 
10077
 
9957
/* Computes which crtcs are affected and sets the relevant bits in the mask. For
10078
/* Computes which crtcs are affected and sets the relevant bits in the mask. For
9958
 * simplicity we use the crtc's pipe number (because it's easier to obtain). */
10079
 * simplicity we use the crtc's pipe number (because it's easier to obtain). */
9959
static void
10080
static void
9960
intel_modeset_affected_pipes(struct drm_crtc *crtc, unsigned *modeset_pipes,
10081
intel_modeset_affected_pipes(struct drm_crtc *crtc, unsigned *modeset_pipes,
9961
			     unsigned *prepare_pipes, unsigned *disable_pipes)
10082
			     unsigned *prepare_pipes, unsigned *disable_pipes)
9962
{
10083
{
9963
	struct intel_crtc *intel_crtc;
10084
	struct intel_crtc *intel_crtc;
9964
	struct drm_device *dev = crtc->dev;
10085
	struct drm_device *dev = crtc->dev;
9965
	struct intel_encoder *encoder;
10086
	struct intel_encoder *encoder;
9966
	struct intel_connector *connector;
10087
	struct intel_connector *connector;
9967
	struct drm_crtc *tmp_crtc;
10088
	struct drm_crtc *tmp_crtc;
9968
 
10089
 
9969
	*disable_pipes = *modeset_pipes = *prepare_pipes = 0;
10090
	*disable_pipes = *modeset_pipes = *prepare_pipes = 0;
9970
 
10091
 
9971
	/* Check which crtcs have changed outputs connected to them, these need
10092
	/* Check which crtcs have changed outputs connected to them, these need
9972
	 * to be part of the prepare_pipes mask. We don't (yet) support global
10093
	 * to be part of the prepare_pipes mask. We don't (yet) support global
9973
	 * modeset across multiple crtcs, so modeset_pipes will only have one
10094
	 * modeset across multiple crtcs, so modeset_pipes will only have one
9974
	 * bit set at most. */
10095
	 * bit set at most. */
9975
	list_for_each_entry(connector, &dev->mode_config.connector_list,
10096
	list_for_each_entry(connector, &dev->mode_config.connector_list,
9976
			    base.head) {
10097
			    base.head) {
9977
		if (connector->base.encoder == &connector->new_encoder->base)
10098
		if (connector->base.encoder == &connector->new_encoder->base)
9978
			continue;
10099
			continue;
9979
 
10100
 
9980
		if (connector->base.encoder) {
10101
		if (connector->base.encoder) {
9981
			tmp_crtc = connector->base.encoder->crtc;
10102
			tmp_crtc = connector->base.encoder->crtc;
9982
 
10103
 
9983
			*prepare_pipes |= 1 << to_intel_crtc(tmp_crtc)->pipe;
10104
			*prepare_pipes |= 1 << to_intel_crtc(tmp_crtc)->pipe;
9984
		}
10105
		}
9985
 
10106
 
9986
		if (connector->new_encoder)
10107
		if (connector->new_encoder)
9987
			*prepare_pipes |=
10108
			*prepare_pipes |=
9988
				1 << connector->new_encoder->new_crtc->pipe;
10109
				1 << connector->new_encoder->new_crtc->pipe;
9989
	}
10110
	}
9990
 
10111
 
9991
	list_for_each_entry(encoder, &dev->mode_config.encoder_list,
-
 
9992
			    base.head) {
10112
	for_each_intel_encoder(dev, encoder) {
9993
		if (encoder->base.crtc == &encoder->new_crtc->base)
10113
		if (encoder->base.crtc == &encoder->new_crtc->base)
9994
			continue;
10114
			continue;
9995
 
10115
 
9996
		if (encoder->base.crtc) {
10116
		if (encoder->base.crtc) {
9997
			tmp_crtc = encoder->base.crtc;
10117
			tmp_crtc = encoder->base.crtc;
9998
 
10118
 
9999
			*prepare_pipes |= 1 << to_intel_crtc(tmp_crtc)->pipe;
10119
			*prepare_pipes |= 1 << to_intel_crtc(tmp_crtc)->pipe;
10000
		}
10120
		}
10001
 
10121
 
10002
		if (encoder->new_crtc)
10122
		if (encoder->new_crtc)
10003
			*prepare_pipes |= 1 << encoder->new_crtc->pipe;
10123
			*prepare_pipes |= 1 << encoder->new_crtc->pipe;
10004
	}
10124
	}
10005
 
10125
 
10006
	/* Check for pipes that will be enabled/disabled ... */
10126
	/* Check for pipes that will be enabled/disabled ... */
10007
	for_each_intel_crtc(dev, intel_crtc) {
10127
	for_each_intel_crtc(dev, intel_crtc) {
10008
		if (intel_crtc->base.enabled == intel_crtc->new_enabled)
10128
		if (intel_crtc->base.enabled == intel_crtc->new_enabled)
10009
			continue;
10129
			continue;
10010
 
10130
 
10011
		if (!intel_crtc->new_enabled)
10131
		if (!intel_crtc->new_enabled)
10012
			*disable_pipes |= 1 << intel_crtc->pipe;
10132
			*disable_pipes |= 1 << intel_crtc->pipe;
10013
		else
10133
		else
10014
			*prepare_pipes |= 1 << intel_crtc->pipe;
10134
			*prepare_pipes |= 1 << intel_crtc->pipe;
10015
	}
10135
	}
10016
 
10136
 
10017
 
10137
 
10018
	/* set_mode is also used to update properties on life display pipes. */
10138
	/* set_mode is also used to update properties on life display pipes. */
10019
	intel_crtc = to_intel_crtc(crtc);
10139
	intel_crtc = to_intel_crtc(crtc);
10020
	if (intel_crtc->new_enabled)
10140
	if (intel_crtc->new_enabled)
10021
		*prepare_pipes |= 1 << intel_crtc->pipe;
10141
		*prepare_pipes |= 1 << intel_crtc->pipe;
10022
 
10142
 
10023
	/*
10143
	/*
10024
	 * For simplicity do a full modeset on any pipe where the output routing
10144
	 * For simplicity do a full modeset on any pipe where the output routing
10025
	 * changed. We could be more clever, but that would require us to be
10145
	 * changed. We could be more clever, but that would require us to be
10026
	 * more careful with calling the relevant encoder->mode_set functions.
10146
	 * more careful with calling the relevant encoder->mode_set functions.
10027
	 */
10147
	 */
10028
	if (*prepare_pipes)
10148
	if (*prepare_pipes)
10029
		*modeset_pipes = *prepare_pipes;
10149
		*modeset_pipes = *prepare_pipes;
10030
 
10150
 
10031
	/* ... and mask these out. */
10151
	/* ... and mask these out. */
10032
	*modeset_pipes &= ~(*disable_pipes);
10152
	*modeset_pipes &= ~(*disable_pipes);
10033
	*prepare_pipes &= ~(*disable_pipes);
10153
	*prepare_pipes &= ~(*disable_pipes);
10034
 
10154
 
10035
	/*
10155
	/*
10036
	 * HACK: We don't (yet) fully support global modesets. intel_set_config
10156
	 * HACK: We don't (yet) fully support global modesets. intel_set_config
10037
	 * obies this rule, but the modeset restore mode of
10157
	 * obies this rule, but the modeset restore mode of
10038
	 * intel_modeset_setup_hw_state does not.
10158
	 * intel_modeset_setup_hw_state does not.
10039
	 */
10159
	 */
10040
	*modeset_pipes &= 1 << intel_crtc->pipe;
10160
	*modeset_pipes &= 1 << intel_crtc->pipe;
10041
	*prepare_pipes &= 1 << intel_crtc->pipe;
10161
	*prepare_pipes &= 1 << intel_crtc->pipe;
10042
 
10162
 
10043
	DRM_DEBUG_KMS("set mode pipe masks: modeset: %x, prepare: %x, disable: %x\n",
10163
	DRM_DEBUG_KMS("set mode pipe masks: modeset: %x, prepare: %x, disable: %x\n",
10044
		      *modeset_pipes, *prepare_pipes, *disable_pipes);
10164
		      *modeset_pipes, *prepare_pipes, *disable_pipes);
10045
}
10165
}
10046
 
10166
 
10047
static bool intel_crtc_in_use(struct drm_crtc *crtc)
10167
static bool intel_crtc_in_use(struct drm_crtc *crtc)
10048
{
10168
{
10049
	struct drm_encoder *encoder;
10169
	struct drm_encoder *encoder;
10050
	struct drm_device *dev = crtc->dev;
10170
	struct drm_device *dev = crtc->dev;
10051
 
10171
 
10052
	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
10172
	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
10053
		if (encoder->crtc == crtc)
10173
		if (encoder->crtc == crtc)
10054
			return true;
10174
			return true;
10055
 
10175
 
10056
	return false;
10176
	return false;
10057
}
10177
}
10058
 
10178
 
10059
static void
10179
static void
10060
intel_modeset_update_state(struct drm_device *dev, unsigned prepare_pipes)
10180
intel_modeset_update_state(struct drm_device *dev, unsigned prepare_pipes)
10061
{
10181
{
-
 
10182
	struct drm_i915_private *dev_priv = dev->dev_private;
10062
	struct intel_encoder *intel_encoder;
10183
	struct intel_encoder *intel_encoder;
10063
	struct intel_crtc *intel_crtc;
10184
	struct intel_crtc *intel_crtc;
10064
	struct drm_connector *connector;
10185
	struct drm_connector *connector;
10065
 
10186
 
-
 
10187
	intel_shared_dpll_commit(dev_priv);
10066
	list_for_each_entry(intel_encoder, &dev->mode_config.encoder_list,
10188
 
10067
			    base.head) {
10189
	for_each_intel_encoder(dev, intel_encoder) {
10068
		if (!intel_encoder->base.crtc)
10190
		if (!intel_encoder->base.crtc)
10069
			continue;
10191
			continue;
10070
 
10192
 
10071
		intel_crtc = to_intel_crtc(intel_encoder->base.crtc);
10193
		intel_crtc = to_intel_crtc(intel_encoder->base.crtc);
10072
 
10194
 
10073
		if (prepare_pipes & (1 << intel_crtc->pipe))
10195
		if (prepare_pipes & (1 << intel_crtc->pipe))
10074
			intel_encoder->connectors_active = false;
10196
			intel_encoder->connectors_active = false;
10075
	}
10197
	}
10076
 
10198
 
10077
	intel_modeset_commit_output_state(dev);
10199
	intel_modeset_commit_output_state(dev);
10078
 
10200
 
10079
	/* Double check state. */
10201
	/* Double check state. */
10080
	for_each_intel_crtc(dev, intel_crtc) {
10202
	for_each_intel_crtc(dev, intel_crtc) {
10081
		WARN_ON(intel_crtc->base.enabled != intel_crtc_in_use(&intel_crtc->base));
10203
		WARN_ON(intel_crtc->base.enabled != intel_crtc_in_use(&intel_crtc->base));
10082
		WARN_ON(intel_crtc->new_config &&
10204
		WARN_ON(intel_crtc->new_config &&
10083
			intel_crtc->new_config != &intel_crtc->config);
10205
			intel_crtc->new_config != &intel_crtc->config);
10084
		WARN_ON(intel_crtc->base.enabled != !!intel_crtc->new_config);
10206
		WARN_ON(intel_crtc->base.enabled != !!intel_crtc->new_config);
10085
	}
10207
	}
10086
 
10208
 
10087
	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
10209
	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
10088
		if (!connector->encoder || !connector->encoder->crtc)
10210
		if (!connector->encoder || !connector->encoder->crtc)
10089
			continue;
10211
			continue;
10090
 
10212
 
10091
		intel_crtc = to_intel_crtc(connector->encoder->crtc);
10213
		intel_crtc = to_intel_crtc(connector->encoder->crtc);
10092
 
10214
 
10093
		if (prepare_pipes & (1 << intel_crtc->pipe)) {
10215
		if (prepare_pipes & (1 << intel_crtc->pipe)) {
10094
			struct drm_property *dpms_property =
10216
			struct drm_property *dpms_property =
10095
				dev->mode_config.dpms_property;
10217
				dev->mode_config.dpms_property;
10096
 
10218
 
10097
			connector->dpms = DRM_MODE_DPMS_ON;
10219
			connector->dpms = DRM_MODE_DPMS_ON;
10098
			drm_object_property_set_value(&connector->base,
10220
			drm_object_property_set_value(&connector->base,
10099
							 dpms_property,
10221
							 dpms_property,
10100
							 DRM_MODE_DPMS_ON);
10222
							 DRM_MODE_DPMS_ON);
10101
 
10223
 
10102
			intel_encoder = to_intel_encoder(connector->encoder);
10224
			intel_encoder = to_intel_encoder(connector->encoder);
10103
			intel_encoder->connectors_active = true;
10225
			intel_encoder->connectors_active = true;
10104
		}
10226
		}
10105
	}
10227
	}
10106
 
10228
 
10107
}
10229
}
10108
 
10230
 
10109
static bool intel_fuzzy_clock_check(int clock1, int clock2)
10231
static bool intel_fuzzy_clock_check(int clock1, int clock2)
10110
{
10232
{
10111
	int diff;
10233
	int diff;
10112
 
10234
 
10113
	if (clock1 == clock2)
10235
	if (clock1 == clock2)
10114
		return true;
10236
		return true;
10115
 
10237
 
10116
	if (!clock1 || !clock2)
10238
	if (!clock1 || !clock2)
10117
		return false;
10239
		return false;
10118
 
10240
 
10119
	diff = abs(clock1 - clock2);
10241
	diff = abs(clock1 - clock2);
10120
 
10242
 
10121
	if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
10243
	if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
10122
		return true;
10244
		return true;
10123
 
10245
 
10124
	return false;
10246
	return false;
10125
}
10247
}
10126
 
10248
 
10127
#define for_each_intel_crtc_masked(dev, mask, intel_crtc) \
10249
#define for_each_intel_crtc_masked(dev, mask, intel_crtc) \
10128
	list_for_each_entry((intel_crtc), \
10250
	list_for_each_entry((intel_crtc), \
10129
			    &(dev)->mode_config.crtc_list, \
10251
			    &(dev)->mode_config.crtc_list, \
10130
			    base.head) \
10252
			    base.head) \
10131
		if (mask & (1 <<(intel_crtc)->pipe))
10253
		if (mask & (1 <<(intel_crtc)->pipe))
10132
 
10254
 
10133
static bool
10255
static bool
10134
intel_pipe_config_compare(struct drm_device *dev,
10256
intel_pipe_config_compare(struct drm_device *dev,
10135
			  struct intel_crtc_config *current_config,
10257
			  struct intel_crtc_config *current_config,
10136
			  struct intel_crtc_config *pipe_config)
10258
			  struct intel_crtc_config *pipe_config)
10137
{
10259
{
10138
#define PIPE_CONF_CHECK_X(name)	\
10260
#define PIPE_CONF_CHECK_X(name)	\
10139
	if (current_config->name != pipe_config->name) { \
10261
	if (current_config->name != pipe_config->name) { \
10140
		DRM_ERROR("mismatch in " #name " " \
10262
		DRM_ERROR("mismatch in " #name " " \
10141
			  "(expected 0x%08x, found 0x%08x)\n", \
10263
			  "(expected 0x%08x, found 0x%08x)\n", \
10142
			  current_config->name, \
10264
			  current_config->name, \
10143
			  pipe_config->name); \
10265
			  pipe_config->name); \
10144
		return false; \
10266
		return false; \
10145
	}
10267
	}
10146
 
10268
 
10147
#define PIPE_CONF_CHECK_I(name)	\
10269
#define PIPE_CONF_CHECK_I(name)	\
10148
	if (current_config->name != pipe_config->name) { \
10270
	if (current_config->name != pipe_config->name) { \
10149
		DRM_ERROR("mismatch in " #name " " \
10271
		DRM_ERROR("mismatch in " #name " " \
10150
			  "(expected %i, found %i)\n", \
10272
			  "(expected %i, found %i)\n", \
10151
			  current_config->name, \
10273
			  current_config->name, \
10152
			  pipe_config->name); \
10274
			  pipe_config->name); \
10153
		return false; \
10275
		return false; \
10154
	}
10276
	}
-
 
10277
 
-
 
10278
/* This is required for BDW+ where there is only one set of registers for
-
 
10279
 * switching between high and low RR.
-
 
10280
 * This macro can be used whenever a comparison has to be made between one
-
 
10281
 * hw state and multiple sw state variables.
-
 
10282
 */
-
 
10283
#define PIPE_CONF_CHECK_I_ALT(name, alt_name) \
-
 
10284
	if ((current_config->name != pipe_config->name) && \
-
 
10285
		(current_config->alt_name != pipe_config->name)) { \
-
 
10286
			DRM_ERROR("mismatch in " #name " " \
-
 
10287
				  "(expected %i or %i, found %i)\n", \
-
 
10288
				  current_config->name, \
-
 
10289
				  current_config->alt_name, \
-
 
10290
				  pipe_config->name); \
-
 
10291
			return false; \
-
 
10292
	}
10155
 
10293
 
10156
#define PIPE_CONF_CHECK_FLAGS(name, mask)	\
10294
#define PIPE_CONF_CHECK_FLAGS(name, mask)	\
10157
	if ((current_config->name ^ pipe_config->name) & (mask)) { \
10295
	if ((current_config->name ^ pipe_config->name) & (mask)) { \
10158
		DRM_ERROR("mismatch in " #name "(" #mask ") "	   \
10296
		DRM_ERROR("mismatch in " #name "(" #mask ") "	   \
10159
			  "(expected %i, found %i)\n", \
10297
			  "(expected %i, found %i)\n", \
10160
			  current_config->name & (mask), \
10298
			  current_config->name & (mask), \
10161
			  pipe_config->name & (mask)); \
10299
			  pipe_config->name & (mask)); \
10162
		return false; \
10300
		return false; \
10163
	}
10301
	}
10164
 
10302
 
10165
#define PIPE_CONF_CHECK_CLOCK_FUZZY(name) \
10303
#define PIPE_CONF_CHECK_CLOCK_FUZZY(name) \
10166
	if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
10304
	if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
10167
		DRM_ERROR("mismatch in " #name " " \
10305
		DRM_ERROR("mismatch in " #name " " \
10168
			  "(expected %i, found %i)\n", \
10306
			  "(expected %i, found %i)\n", \
10169
			  current_config->name, \
10307
			  current_config->name, \
10170
			  pipe_config->name); \
10308
			  pipe_config->name); \
10171
		return false; \
10309
		return false; \
10172
	}
10310
	}
10173
 
10311
 
10174
#define PIPE_CONF_QUIRK(quirk)	\
10312
#define PIPE_CONF_QUIRK(quirk)	\
10175
	((current_config->quirks | pipe_config->quirks) & (quirk))
10313
	((current_config->quirks | pipe_config->quirks) & (quirk))
10176
 
10314
 
10177
	PIPE_CONF_CHECK_I(cpu_transcoder);
10315
	PIPE_CONF_CHECK_I(cpu_transcoder);
10178
 
10316
 
10179
	PIPE_CONF_CHECK_I(has_pch_encoder);
10317
	PIPE_CONF_CHECK_I(has_pch_encoder);
10180
	PIPE_CONF_CHECK_I(fdi_lanes);
10318
	PIPE_CONF_CHECK_I(fdi_lanes);
10181
	PIPE_CONF_CHECK_I(fdi_m_n.gmch_m);
10319
	PIPE_CONF_CHECK_I(fdi_m_n.gmch_m);
10182
	PIPE_CONF_CHECK_I(fdi_m_n.gmch_n);
10320
	PIPE_CONF_CHECK_I(fdi_m_n.gmch_n);
10183
	PIPE_CONF_CHECK_I(fdi_m_n.link_m);
10321
	PIPE_CONF_CHECK_I(fdi_m_n.link_m);
10184
	PIPE_CONF_CHECK_I(fdi_m_n.link_n);
10322
	PIPE_CONF_CHECK_I(fdi_m_n.link_n);
10185
	PIPE_CONF_CHECK_I(fdi_m_n.tu);
10323
	PIPE_CONF_CHECK_I(fdi_m_n.tu);
10186
 
10324
 
10187
	PIPE_CONF_CHECK_I(has_dp_encoder);
10325
	PIPE_CONF_CHECK_I(has_dp_encoder);
-
 
10326
 
-
 
10327
	if (INTEL_INFO(dev)->gen < 8) {
10188
	PIPE_CONF_CHECK_I(dp_m_n.gmch_m);
10328
	PIPE_CONF_CHECK_I(dp_m_n.gmch_m);
10189
	PIPE_CONF_CHECK_I(dp_m_n.gmch_n);
10329
	PIPE_CONF_CHECK_I(dp_m_n.gmch_n);
10190
	PIPE_CONF_CHECK_I(dp_m_n.link_m);
10330
	PIPE_CONF_CHECK_I(dp_m_n.link_m);
10191
	PIPE_CONF_CHECK_I(dp_m_n.link_n);
10331
	PIPE_CONF_CHECK_I(dp_m_n.link_n);
10192
	PIPE_CONF_CHECK_I(dp_m_n.tu);
10332
	PIPE_CONF_CHECK_I(dp_m_n.tu);
-
 
10333
 
-
 
10334
		if (current_config->has_drrs) {
-
 
10335
			PIPE_CONF_CHECK_I(dp_m2_n2.gmch_m);
-
 
10336
			PIPE_CONF_CHECK_I(dp_m2_n2.gmch_n);
-
 
10337
			PIPE_CONF_CHECK_I(dp_m2_n2.link_m);
-
 
10338
			PIPE_CONF_CHECK_I(dp_m2_n2.link_n);
-
 
10339
			PIPE_CONF_CHECK_I(dp_m2_n2.tu);
-
 
10340
		}
-
 
10341
	} else {
-
 
10342
		PIPE_CONF_CHECK_I_ALT(dp_m_n.gmch_m, dp_m2_n2.gmch_m);
-
 
10343
		PIPE_CONF_CHECK_I_ALT(dp_m_n.gmch_n, dp_m2_n2.gmch_n);
-
 
10344
		PIPE_CONF_CHECK_I_ALT(dp_m_n.link_m, dp_m2_n2.link_m);
-
 
10345
		PIPE_CONF_CHECK_I_ALT(dp_m_n.link_n, dp_m2_n2.link_n);
-
 
10346
		PIPE_CONF_CHECK_I_ALT(dp_m_n.tu, dp_m2_n2.tu);
-
 
10347
	}
10193
 
10348
 
10194
	PIPE_CONF_CHECK_I(adjusted_mode.crtc_hdisplay);
10349
	PIPE_CONF_CHECK_I(adjusted_mode.crtc_hdisplay);
10195
	PIPE_CONF_CHECK_I(adjusted_mode.crtc_htotal);
10350
	PIPE_CONF_CHECK_I(adjusted_mode.crtc_htotal);
10196
	PIPE_CONF_CHECK_I(adjusted_mode.crtc_hblank_start);
10351
	PIPE_CONF_CHECK_I(adjusted_mode.crtc_hblank_start);
10197
	PIPE_CONF_CHECK_I(adjusted_mode.crtc_hblank_end);
10352
	PIPE_CONF_CHECK_I(adjusted_mode.crtc_hblank_end);
10198
	PIPE_CONF_CHECK_I(adjusted_mode.crtc_hsync_start);
10353
	PIPE_CONF_CHECK_I(adjusted_mode.crtc_hsync_start);
10199
	PIPE_CONF_CHECK_I(adjusted_mode.crtc_hsync_end);
10354
	PIPE_CONF_CHECK_I(adjusted_mode.crtc_hsync_end);
10200
 
10355
 
10201
	PIPE_CONF_CHECK_I(adjusted_mode.crtc_vdisplay);
10356
	PIPE_CONF_CHECK_I(adjusted_mode.crtc_vdisplay);
10202
	PIPE_CONF_CHECK_I(adjusted_mode.crtc_vtotal);
10357
	PIPE_CONF_CHECK_I(adjusted_mode.crtc_vtotal);
10203
	PIPE_CONF_CHECK_I(adjusted_mode.crtc_vblank_start);
10358
	PIPE_CONF_CHECK_I(adjusted_mode.crtc_vblank_start);
10204
	PIPE_CONF_CHECK_I(adjusted_mode.crtc_vblank_end);
10359
	PIPE_CONF_CHECK_I(adjusted_mode.crtc_vblank_end);
10205
	PIPE_CONF_CHECK_I(adjusted_mode.crtc_vsync_start);
10360
	PIPE_CONF_CHECK_I(adjusted_mode.crtc_vsync_start);
10206
	PIPE_CONF_CHECK_I(adjusted_mode.crtc_vsync_end);
10361
	PIPE_CONF_CHECK_I(adjusted_mode.crtc_vsync_end);
10207
 
10362
 
10208
		PIPE_CONF_CHECK_I(pixel_multiplier);
10363
		PIPE_CONF_CHECK_I(pixel_multiplier);
10209
	PIPE_CONF_CHECK_I(has_hdmi_sink);
10364
	PIPE_CONF_CHECK_I(has_hdmi_sink);
10210
	if ((INTEL_INFO(dev)->gen < 8 && !IS_HASWELL(dev)) ||
10365
	if ((INTEL_INFO(dev)->gen < 8 && !IS_HASWELL(dev)) ||
10211
	    IS_VALLEYVIEW(dev))
10366
	    IS_VALLEYVIEW(dev))
10212
		PIPE_CONF_CHECK_I(limited_color_range);
10367
		PIPE_CONF_CHECK_I(limited_color_range);
-
 
10368
	PIPE_CONF_CHECK_I(has_infoframe);
10213
 
10369
 
10214
	PIPE_CONF_CHECK_I(has_audio);
10370
	PIPE_CONF_CHECK_I(has_audio);
10215
 
10371
 
10216
	PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
10372
	PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
10217
			      DRM_MODE_FLAG_INTERLACE);
10373
			      DRM_MODE_FLAG_INTERLACE);
10218
 
10374
 
10219
	if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
10375
	if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
10220
		PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
10376
		PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
10221
				      DRM_MODE_FLAG_PHSYNC);
10377
				      DRM_MODE_FLAG_PHSYNC);
10222
		PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
10378
		PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
10223
				      DRM_MODE_FLAG_NHSYNC);
10379
				      DRM_MODE_FLAG_NHSYNC);
10224
		PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
10380
		PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
10225
				      DRM_MODE_FLAG_PVSYNC);
10381
				      DRM_MODE_FLAG_PVSYNC);
10226
		PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
10382
		PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
10227
				      DRM_MODE_FLAG_NVSYNC);
10383
				      DRM_MODE_FLAG_NVSYNC);
10228
	}
10384
	}
10229
 
10385
 
10230
	PIPE_CONF_CHECK_I(pipe_src_w);
10386
	PIPE_CONF_CHECK_I(pipe_src_w);
10231
	PIPE_CONF_CHECK_I(pipe_src_h);
10387
	PIPE_CONF_CHECK_I(pipe_src_h);
10232
 
10388
 
10233
	/*
10389
	/*
10234
	 * FIXME: BIOS likes to set up a cloned config with lvds+external
10390
	 * FIXME: BIOS likes to set up a cloned config with lvds+external
10235
	 * screen. Since we don't yet re-compute the pipe config when moving
10391
	 * screen. Since we don't yet re-compute the pipe config when moving
10236
	 * just the lvds port away to another pipe the sw tracking won't match.
10392
	 * just the lvds port away to another pipe the sw tracking won't match.
10237
	 *
10393
	 *
10238
	 * Proper atomic modesets with recomputed global state will fix this.
10394
	 * Proper atomic modesets with recomputed global state will fix this.
10239
	 * Until then just don't check gmch state for inherited modes.
10395
	 * Until then just don't check gmch state for inherited modes.
10240
	 */
10396
	 */
10241
	if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_INHERITED_MODE)) {
10397
	if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_INHERITED_MODE)) {
10242
	PIPE_CONF_CHECK_I(gmch_pfit.control);
10398
	PIPE_CONF_CHECK_I(gmch_pfit.control);
10243
	/* pfit ratios are autocomputed by the hw on gen4+ */
10399
	/* pfit ratios are autocomputed by the hw on gen4+ */
10244
	if (INTEL_INFO(dev)->gen < 4)
10400
	if (INTEL_INFO(dev)->gen < 4)
10245
		PIPE_CONF_CHECK_I(gmch_pfit.pgm_ratios);
10401
		PIPE_CONF_CHECK_I(gmch_pfit.pgm_ratios);
10246
	PIPE_CONF_CHECK_I(gmch_pfit.lvds_border_bits);
10402
	PIPE_CONF_CHECK_I(gmch_pfit.lvds_border_bits);
10247
	}
10403
	}
10248
 
10404
 
10249
	PIPE_CONF_CHECK_I(pch_pfit.enabled);
10405
	PIPE_CONF_CHECK_I(pch_pfit.enabled);
10250
	if (current_config->pch_pfit.enabled) {
10406
	if (current_config->pch_pfit.enabled) {
10251
	PIPE_CONF_CHECK_I(pch_pfit.pos);
10407
	PIPE_CONF_CHECK_I(pch_pfit.pos);
10252
	PIPE_CONF_CHECK_I(pch_pfit.size);
10408
	PIPE_CONF_CHECK_I(pch_pfit.size);
10253
	}
10409
	}
10254
 
10410
 
10255
	/* BDW+ don't expose a synchronous way to read the state */
10411
	/* BDW+ don't expose a synchronous way to read the state */
10256
	if (IS_HASWELL(dev))
10412
	if (IS_HASWELL(dev))
10257
	PIPE_CONF_CHECK_I(ips_enabled);
10413
	PIPE_CONF_CHECK_I(ips_enabled);
10258
 
10414
 
10259
	PIPE_CONF_CHECK_I(double_wide);
10415
	PIPE_CONF_CHECK_I(double_wide);
10260
 
10416
 
10261
	PIPE_CONF_CHECK_X(ddi_pll_sel);
10417
	PIPE_CONF_CHECK_X(ddi_pll_sel);
10262
 
10418
 
10263
	PIPE_CONF_CHECK_I(shared_dpll);
10419
	PIPE_CONF_CHECK_I(shared_dpll);
10264
	PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
10420
	PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
10265
	PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
10421
	PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
10266
	PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
10422
	PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
10267
	PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
10423
	PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
10268
	PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
10424
	PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
-
 
10425
	PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1);
-
 
10426
	PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
-
 
10427
	PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
10269
 
10428
 
10270
	if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5)
10429
	if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5)
10271
		PIPE_CONF_CHECK_I(pipe_bpp);
10430
		PIPE_CONF_CHECK_I(pipe_bpp);
10272
 
10431
 
10273
		PIPE_CONF_CHECK_CLOCK_FUZZY(adjusted_mode.crtc_clock);
10432
		PIPE_CONF_CHECK_CLOCK_FUZZY(adjusted_mode.crtc_clock);
10274
		PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
10433
		PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
10275
 
10434
 
10276
#undef PIPE_CONF_CHECK_X
10435
#undef PIPE_CONF_CHECK_X
10277
#undef PIPE_CONF_CHECK_I
10436
#undef PIPE_CONF_CHECK_I
-
 
10437
#undef PIPE_CONF_CHECK_I_ALT
10278
#undef PIPE_CONF_CHECK_FLAGS
10438
#undef PIPE_CONF_CHECK_FLAGS
10279
#undef PIPE_CONF_CHECK_CLOCK_FUZZY
10439
#undef PIPE_CONF_CHECK_CLOCK_FUZZY
10280
#undef PIPE_CONF_QUIRK
10440
#undef PIPE_CONF_QUIRK
10281
 
10441
 
10282
	return true;
10442
	return true;
10283
}
10443
}
-
 
10444
 
-
 
10445
static void check_wm_state(struct drm_device *dev)
-
 
10446
{
-
 
10447
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
10448
	struct skl_ddb_allocation hw_ddb, *sw_ddb;
-
 
10449
	struct intel_crtc *intel_crtc;
-
 
10450
	int plane;
-
 
10451
 
-
 
10452
	if (INTEL_INFO(dev)->gen < 9)
-
 
10453
		return;
-
 
10454
 
-
 
10455
	skl_ddb_get_hw_state(dev_priv, &hw_ddb);
-
 
10456
	sw_ddb = &dev_priv->wm.skl_hw.ddb;
-
 
10457
 
-
 
10458
	for_each_intel_crtc(dev, intel_crtc) {
-
 
10459
		struct skl_ddb_entry *hw_entry, *sw_entry;
-
 
10460
		const enum pipe pipe = intel_crtc->pipe;
-
 
10461
 
-
 
10462
		if (!intel_crtc->active)
-
 
10463
			continue;
-
 
10464
 
-
 
10465
		/* planes */
-
 
10466
		for_each_plane(pipe, plane) {
-
 
10467
			hw_entry = &hw_ddb.plane[pipe][plane];
-
 
10468
			sw_entry = &sw_ddb->plane[pipe][plane];
-
 
10469
 
-
 
10470
			if (skl_ddb_entry_equal(hw_entry, sw_entry))
-
 
10471
				continue;
-
 
10472
 
-
 
10473
			DRM_ERROR("mismatch in DDB state pipe %c plane %d "
-
 
10474
				  "(expected (%u,%u), found (%u,%u))\n",
-
 
10475
				  pipe_name(pipe), plane + 1,
-
 
10476
				  sw_entry->start, sw_entry->end,
-
 
10477
				  hw_entry->start, hw_entry->end);
-
 
10478
		}
-
 
10479
 
-
 
10480
		/* cursor */
-
 
10481
		hw_entry = &hw_ddb.cursor[pipe];
-
 
10482
		sw_entry = &sw_ddb->cursor[pipe];
-
 
10483
 
-
 
10484
		if (skl_ddb_entry_equal(hw_entry, sw_entry))
-
 
10485
			continue;
-
 
10486
 
-
 
10487
		DRM_ERROR("mismatch in DDB state pipe %c cursor "
-
 
10488
			  "(expected (%u,%u), found (%u,%u))\n",
-
 
10489
			  pipe_name(pipe),
-
 
10490
			  sw_entry->start, sw_entry->end,
-
 
10491
			  hw_entry->start, hw_entry->end);
-
 
10492
	}
-
 
10493
}
10284
 
10494
 
10285
static void
10495
static void
10286
check_connector_state(struct drm_device *dev)
10496
check_connector_state(struct drm_device *dev)
10287
{
10497
{
10288
	struct intel_connector *connector;
10498
	struct intel_connector *connector;
10289
 
10499
 
10290
	list_for_each_entry(connector, &dev->mode_config.connector_list,
10500
	list_for_each_entry(connector, &dev->mode_config.connector_list,
10291
			    base.head) {
10501
			    base.head) {
10292
		/* This also checks the encoder/connector hw state with the
10502
		/* This also checks the encoder/connector hw state with the
10293
		 * ->get_hw_state callbacks. */
10503
		 * ->get_hw_state callbacks. */
10294
		intel_connector_check_state(connector);
10504
		intel_connector_check_state(connector);
10295
 
10505
 
10296
		WARN(&connector->new_encoder->base != connector->base.encoder,
10506
		WARN(&connector->new_encoder->base != connector->base.encoder,
10297
		     "connector's staged encoder doesn't match current encoder\n");
10507
		     "connector's staged encoder doesn't match current encoder\n");
10298
	}
10508
	}
10299
}
10509
}
10300
 
10510
 
10301
static void
10511
static void
10302
check_encoder_state(struct drm_device *dev)
10512
check_encoder_state(struct drm_device *dev)
10303
{
10513
{
10304
	struct intel_encoder *encoder;
10514
	struct intel_encoder *encoder;
10305
	struct intel_connector *connector;
10515
	struct intel_connector *connector;
10306
 
10516
 
10307
	list_for_each_entry(encoder, &dev->mode_config.encoder_list,
-
 
10308
			    base.head) {
10517
	for_each_intel_encoder(dev, encoder) {
10309
		bool enabled = false;
10518
		bool enabled = false;
10310
		bool active = false;
10519
		bool active = false;
10311
		enum pipe pipe, tracked_pipe;
10520
		enum pipe pipe, tracked_pipe;
10312
 
10521
 
10313
		DRM_DEBUG_KMS("[ENCODER:%d:%s]\n",
10522
		DRM_DEBUG_KMS("[ENCODER:%d:%s]\n",
10314
			      encoder->base.base.id,
10523
			      encoder->base.base.id,
10315
			      encoder->base.name);
10524
			      encoder->base.name);
10316
 
10525
 
10317
		WARN(&encoder->new_crtc->base != encoder->base.crtc,
10526
		WARN(&encoder->new_crtc->base != encoder->base.crtc,
10318
		     "encoder's stage crtc doesn't match current crtc\n");
10527
		     "encoder's stage crtc doesn't match current crtc\n");
10319
		WARN(encoder->connectors_active && !encoder->base.crtc,
10528
		WARN(encoder->connectors_active && !encoder->base.crtc,
10320
		     "encoder's active_connectors set, but no crtc\n");
10529
		     "encoder's active_connectors set, but no crtc\n");
10321
 
10530
 
10322
		list_for_each_entry(connector, &dev->mode_config.connector_list,
10531
		list_for_each_entry(connector, &dev->mode_config.connector_list,
10323
				    base.head) {
10532
				    base.head) {
10324
			if (connector->base.encoder != &encoder->base)
10533
			if (connector->base.encoder != &encoder->base)
10325
				continue;
10534
				continue;
10326
			enabled = true;
10535
			enabled = true;
10327
			if (connector->base.dpms != DRM_MODE_DPMS_OFF)
10536
			if (connector->base.dpms != DRM_MODE_DPMS_OFF)
10328
				active = true;
10537
				active = true;
10329
		}
10538
		}
10330
		/*
10539
		/*
10331
		 * for MST connectors if we unplug the connector is gone
10540
		 * for MST connectors if we unplug the connector is gone
10332
		 * away but the encoder is still connected to a crtc
10541
		 * away but the encoder is still connected to a crtc
10333
		 * until a modeset happens in response to the hotplug.
10542
		 * until a modeset happens in response to the hotplug.
10334
		 */
10543
		 */
10335
		if (!enabled && encoder->base.encoder_type == DRM_MODE_ENCODER_DPMST)
10544
		if (!enabled && encoder->base.encoder_type == DRM_MODE_ENCODER_DPMST)
10336
			continue;
10545
			continue;
10337
 
10546
 
10338
		WARN(!!encoder->base.crtc != enabled,
10547
		WARN(!!encoder->base.crtc != enabled,
10339
		     "encoder's enabled state mismatch "
10548
		     "encoder's enabled state mismatch "
10340
		     "(expected %i, found %i)\n",
10549
		     "(expected %i, found %i)\n",
10341
		     !!encoder->base.crtc, enabled);
10550
		     !!encoder->base.crtc, enabled);
10342
		WARN(active && !encoder->base.crtc,
10551
		WARN(active && !encoder->base.crtc,
10343
		     "active encoder with no crtc\n");
10552
		     "active encoder with no crtc\n");
10344
 
10553
 
10345
		WARN(encoder->connectors_active != active,
10554
		WARN(encoder->connectors_active != active,
10346
		     "encoder's computed active state doesn't match tracked active state "
10555
		     "encoder's computed active state doesn't match tracked active state "
10347
		     "(expected %i, found %i)\n", active, encoder->connectors_active);
10556
		     "(expected %i, found %i)\n", active, encoder->connectors_active);
10348
 
10557
 
10349
		active = encoder->get_hw_state(encoder, &pipe);
10558
		active = encoder->get_hw_state(encoder, &pipe);
10350
		WARN(active != encoder->connectors_active,
10559
		WARN(active != encoder->connectors_active,
10351
		     "encoder's hw state doesn't match sw tracking "
10560
		     "encoder's hw state doesn't match sw tracking "
10352
		     "(expected %i, found %i)\n",
10561
		     "(expected %i, found %i)\n",
10353
		     encoder->connectors_active, active);
10562
		     encoder->connectors_active, active);
10354
 
10563
 
10355
		if (!encoder->base.crtc)
10564
		if (!encoder->base.crtc)
10356
			continue;
10565
			continue;
10357
 
10566
 
10358
		tracked_pipe = to_intel_crtc(encoder->base.crtc)->pipe;
10567
		tracked_pipe = to_intel_crtc(encoder->base.crtc)->pipe;
10359
		WARN(active && pipe != tracked_pipe,
10568
		WARN(active && pipe != tracked_pipe,
10360
		     "active encoder's pipe doesn't match"
10569
		     "active encoder's pipe doesn't match"
10361
		     "(expected %i, found %i)\n",
10570
		     "(expected %i, found %i)\n",
10362
		     tracked_pipe, pipe);
10571
		     tracked_pipe, pipe);
10363
 
10572
 
10364
	}
10573
	}
10365
}
10574
}
10366
 
10575
 
10367
static void
10576
static void
10368
check_crtc_state(struct drm_device *dev)
10577
check_crtc_state(struct drm_device *dev)
10369
{
10578
{
10370
	struct drm_i915_private *dev_priv = dev->dev_private;
10579
	struct drm_i915_private *dev_priv = dev->dev_private;
10371
	struct intel_crtc *crtc;
10580
	struct intel_crtc *crtc;
10372
	struct intel_encoder *encoder;
10581
	struct intel_encoder *encoder;
10373
	struct intel_crtc_config pipe_config;
10582
	struct intel_crtc_config pipe_config;
10374
 
10583
 
10375
	for_each_intel_crtc(dev, crtc) {
10584
	for_each_intel_crtc(dev, crtc) {
10376
		bool enabled = false;
10585
		bool enabled = false;
10377
		bool active = false;
10586
		bool active = false;
10378
 
10587
 
10379
		memset(&pipe_config, 0, sizeof(pipe_config));
10588
		memset(&pipe_config, 0, sizeof(pipe_config));
10380
 
10589
 
10381
		DRM_DEBUG_KMS("[CRTC:%d]\n",
10590
		DRM_DEBUG_KMS("[CRTC:%d]\n",
10382
			      crtc->base.base.id);
10591
			      crtc->base.base.id);
10383
 
10592
 
10384
		WARN(crtc->active && !crtc->base.enabled,
10593
		WARN(crtc->active && !crtc->base.enabled,
10385
		     "active crtc, but not enabled in sw tracking\n");
10594
		     "active crtc, but not enabled in sw tracking\n");
10386
 
10595
 
10387
		list_for_each_entry(encoder, &dev->mode_config.encoder_list,
-
 
10388
				    base.head) {
10596
		for_each_intel_encoder(dev, encoder) {
10389
			if (encoder->base.crtc != &crtc->base)
10597
			if (encoder->base.crtc != &crtc->base)
10390
				continue;
10598
				continue;
10391
			enabled = true;
10599
			enabled = true;
10392
			if (encoder->connectors_active)
10600
			if (encoder->connectors_active)
10393
				active = true;
10601
				active = true;
10394
		}
10602
		}
10395
 
10603
 
10396
		WARN(active != crtc->active,
10604
		WARN(active != crtc->active,
10397
		     "crtc's computed active state doesn't match tracked active state "
10605
		     "crtc's computed active state doesn't match tracked active state "
10398
		     "(expected %i, found %i)\n", active, crtc->active);
10606
		     "(expected %i, found %i)\n", active, crtc->active);
10399
		WARN(enabled != crtc->base.enabled,
10607
		WARN(enabled != crtc->base.enabled,
10400
		     "crtc's computed enabled state doesn't match tracked enabled state "
10608
		     "crtc's computed enabled state doesn't match tracked enabled state "
10401
		     "(expected %i, found %i)\n", enabled, crtc->base.enabled);
10609
		     "(expected %i, found %i)\n", enabled, crtc->base.enabled);
10402
 
10610
 
10403
		active = dev_priv->display.get_pipe_config(crtc,
10611
		active = dev_priv->display.get_pipe_config(crtc,
10404
							   &pipe_config);
10612
							   &pipe_config);
10405
 
10613
 
10406
		/* hw state is inconsistent with the pipe A quirk */
10614
		/* hw state is inconsistent with the pipe quirk */
-
 
10615
		if ((crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
10407
		if (crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE)
10616
		    (crtc->pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
10408
			active = crtc->active;
10617
			active = crtc->active;
10409
 
-
 
10410
		list_for_each_entry(encoder, &dev->mode_config.encoder_list,
10618
 
10411
				    base.head) {
10619
		for_each_intel_encoder(dev, encoder) {
10412
			enum pipe pipe;
10620
			enum pipe pipe;
10413
			if (encoder->base.crtc != &crtc->base)
10621
			if (encoder->base.crtc != &crtc->base)
10414
				continue;
10622
				continue;
10415
			if (encoder->get_hw_state(encoder, &pipe))
10623
			if (encoder->get_hw_state(encoder, &pipe))
10416
				encoder->get_config(encoder, &pipe_config);
10624
				encoder->get_config(encoder, &pipe_config);
10417
		}
10625
		}
10418
 
10626
 
10419
		WARN(crtc->active != active,
10627
		WARN(crtc->active != active,
10420
		     "crtc active state doesn't match with hw state "
10628
		     "crtc active state doesn't match with hw state "
10421
		     "(expected %i, found %i)\n", crtc->active, active);
10629
		     "(expected %i, found %i)\n", crtc->active, active);
10422
 
10630
 
10423
		if (active &&
10631
		if (active &&
10424
		    !intel_pipe_config_compare(dev, &crtc->config, &pipe_config)) {
10632
		    !intel_pipe_config_compare(dev, &crtc->config, &pipe_config)) {
10425
			WARN(1, "pipe state doesn't match!\n");
10633
			WARN(1, "pipe state doesn't match!\n");
10426
			intel_dump_pipe_config(crtc, &pipe_config,
10634
			intel_dump_pipe_config(crtc, &pipe_config,
10427
					       "[hw state]");
10635
					       "[hw state]");
10428
			intel_dump_pipe_config(crtc, &crtc->config,
10636
			intel_dump_pipe_config(crtc, &crtc->config,
10429
					       "[sw state]");
10637
					       "[sw state]");
10430
		}
10638
		}
10431
	}
10639
	}
10432
}
10640
}
10433
 
10641
 
10434
static void
10642
static void
10435
check_shared_dpll_state(struct drm_device *dev)
10643
check_shared_dpll_state(struct drm_device *dev)
10436
{
10644
{
10437
	struct drm_i915_private *dev_priv = dev->dev_private;
10645
	struct drm_i915_private *dev_priv = dev->dev_private;
10438
	struct intel_crtc *crtc;
10646
	struct intel_crtc *crtc;
10439
	struct intel_dpll_hw_state dpll_hw_state;
10647
	struct intel_dpll_hw_state dpll_hw_state;
10440
	int i;
10648
	int i;
10441
 
10649
 
10442
	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
10650
	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
10443
		struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
10651
		struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
10444
		int enabled_crtcs = 0, active_crtcs = 0;
10652
		int enabled_crtcs = 0, active_crtcs = 0;
10445
		bool active;
10653
		bool active;
10446
 
10654
 
10447
		memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
10655
		memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
10448
 
10656
 
10449
		DRM_DEBUG_KMS("%s\n", pll->name);
10657
		DRM_DEBUG_KMS("%s\n", pll->name);
10450
 
10658
 
10451
		active = pll->get_hw_state(dev_priv, pll, &dpll_hw_state);
10659
		active = pll->get_hw_state(dev_priv, pll, &dpll_hw_state);
10452
 
10660
 
10453
		WARN(pll->active > pll->refcount,
10661
		WARN(pll->active > hweight32(pll->config.crtc_mask),
10454
		     "more active pll users than references: %i vs %i\n",
10662
		     "more active pll users than references: %i vs %i\n",
10455
		     pll->active, pll->refcount);
10663
		     pll->active, hweight32(pll->config.crtc_mask));
10456
		WARN(pll->active && !pll->on,
10664
		WARN(pll->active && !pll->on,
10457
		     "pll in active use but not on in sw tracking\n");
10665
		     "pll in active use but not on in sw tracking\n");
10458
		WARN(pll->on && !pll->active,
10666
		WARN(pll->on && !pll->active,
10459
		     "pll in on but not on in use in sw tracking\n");
10667
		     "pll in on but not on in use in sw tracking\n");
10460
		WARN(pll->on != active,
10668
		WARN(pll->on != active,
10461
		     "pll on state mismatch (expected %i, found %i)\n",
10669
		     "pll on state mismatch (expected %i, found %i)\n",
10462
		     pll->on, active);
10670
		     pll->on, active);
10463
 
10671
 
10464
		for_each_intel_crtc(dev, crtc) {
10672
		for_each_intel_crtc(dev, crtc) {
10465
			if (crtc->base.enabled && intel_crtc_to_shared_dpll(crtc) == pll)
10673
			if (crtc->base.enabled && intel_crtc_to_shared_dpll(crtc) == pll)
10466
				enabled_crtcs++;
10674
				enabled_crtcs++;
10467
			if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll)
10675
			if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll)
10468
				active_crtcs++;
10676
				active_crtcs++;
10469
		}
10677
		}
10470
		WARN(pll->active != active_crtcs,
10678
		WARN(pll->active != active_crtcs,
10471
		     "pll active crtcs mismatch (expected %i, found %i)\n",
10679
		     "pll active crtcs mismatch (expected %i, found %i)\n",
10472
		     pll->active, active_crtcs);
10680
		     pll->active, active_crtcs);
10473
		WARN(pll->refcount != enabled_crtcs,
10681
		WARN(hweight32(pll->config.crtc_mask) != enabled_crtcs,
10474
		     "pll enabled crtcs mismatch (expected %i, found %i)\n",
10682
		     "pll enabled crtcs mismatch (expected %i, found %i)\n",
10475
		     pll->refcount, enabled_crtcs);
10683
		     hweight32(pll->config.crtc_mask), enabled_crtcs);
10476
 
10684
 
10477
		WARN(pll->on && memcmp(&pll->hw_state, &dpll_hw_state,
10685
		WARN(pll->on && memcmp(&pll->config.hw_state, &dpll_hw_state,
10478
				       sizeof(dpll_hw_state)),
10686
				       sizeof(dpll_hw_state)),
10479
		     "pll hw state mismatch\n");
10687
		     "pll hw state mismatch\n");
10480
	}
10688
	}
10481
}
10689
}
10482
 
10690
 
10483
void
10691
void
10484
intel_modeset_check_state(struct drm_device *dev)
10692
intel_modeset_check_state(struct drm_device *dev)
10485
{
10693
{
-
 
10694
	check_wm_state(dev);
10486
	check_connector_state(dev);
10695
	check_connector_state(dev);
10487
	check_encoder_state(dev);
10696
	check_encoder_state(dev);
10488
	check_crtc_state(dev);
10697
	check_crtc_state(dev);
10489
	check_shared_dpll_state(dev);
10698
	check_shared_dpll_state(dev);
10490
}
10699
}
10491
 
10700
 
10492
void ironlake_check_encoder_dotclock(const struct intel_crtc_config *pipe_config,
10701
void ironlake_check_encoder_dotclock(const struct intel_crtc_config *pipe_config,
10493
				     int dotclock)
10702
				     int dotclock)
10494
{
10703
{
10495
	/*
10704
	/*
10496
	 * FDI already provided one idea for the dotclock.
10705
	 * FDI already provided one idea for the dotclock.
10497
	 * Yell if the encoder disagrees.
10706
	 * Yell if the encoder disagrees.
10498
	 */
10707
	 */
10499
	WARN(!intel_fuzzy_clock_check(pipe_config->adjusted_mode.crtc_clock, dotclock),
10708
	WARN(!intel_fuzzy_clock_check(pipe_config->adjusted_mode.crtc_clock, dotclock),
10500
	     "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
10709
	     "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
10501
	     pipe_config->adjusted_mode.crtc_clock, dotclock);
10710
	     pipe_config->adjusted_mode.crtc_clock, dotclock);
10502
}
10711
}
10503
 
10712
 
10504
static void update_scanline_offset(struct intel_crtc *crtc)
10713
static void update_scanline_offset(struct intel_crtc *crtc)
10505
{
10714
{
10506
	struct drm_device *dev = crtc->base.dev;
10715
	struct drm_device *dev = crtc->base.dev;
10507
 
10716
 
10508
	/*
10717
	/*
10509
	 * The scanline counter increments at the leading edge of hsync.
10718
	 * The scanline counter increments at the leading edge of hsync.
10510
	 *
10719
	 *
10511
	 * On most platforms it starts counting from vtotal-1 on the
10720
	 * On most platforms it starts counting from vtotal-1 on the
10512
	 * first active line. That means the scanline counter value is
10721
	 * first active line. That means the scanline counter value is
10513
	 * always one less than what we would expect. Ie. just after
10722
	 * always one less than what we would expect. Ie. just after
10514
	 * start of vblank, which also occurs at start of hsync (on the
10723
	 * start of vblank, which also occurs at start of hsync (on the
10515
	 * last active line), the scanline counter will read vblank_start-1.
10724
	 * last active line), the scanline counter will read vblank_start-1.
10516
	 *
10725
	 *
10517
	 * On gen2 the scanline counter starts counting from 1 instead
10726
	 * On gen2 the scanline counter starts counting from 1 instead
10518
	 * of vtotal-1, so we have to subtract one (or rather add vtotal-1
10727
	 * of vtotal-1, so we have to subtract one (or rather add vtotal-1
10519
	 * to keep the value positive), instead of adding one.
10728
	 * to keep the value positive), instead of adding one.
10520
	 *
10729
	 *
10521
	 * On HSW+ the behaviour of the scanline counter depends on the output
10730
	 * On HSW+ the behaviour of the scanline counter depends on the output
10522
	 * type. For DP ports it behaves like most other platforms, but on HDMI
10731
	 * type. For DP ports it behaves like most other platforms, but on HDMI
10523
	 * there's an extra 1 line difference. So we need to add two instead of
10732
	 * there's an extra 1 line difference. So we need to add two instead of
10524
	 * one to the value.
10733
	 * one to the value.
10525
	 */
10734
	 */
10526
	if (IS_GEN2(dev)) {
10735
	if (IS_GEN2(dev)) {
10527
		const struct drm_display_mode *mode = &crtc->config.adjusted_mode;
10736
		const struct drm_display_mode *mode = &crtc->config.adjusted_mode;
10528
		int vtotal;
10737
		int vtotal;
10529
 
10738
 
10530
		vtotal = mode->crtc_vtotal;
10739
		vtotal = mode->crtc_vtotal;
10531
		if (mode->flags & DRM_MODE_FLAG_INTERLACE)
10740
		if (mode->flags & DRM_MODE_FLAG_INTERLACE)
10532
			vtotal /= 2;
10741
			vtotal /= 2;
10533
 
10742
 
10534
		crtc->scanline_offset = vtotal - 1;
10743
		crtc->scanline_offset = vtotal - 1;
10535
	} else if (HAS_DDI(dev) &&
10744
	} else if (HAS_DDI(dev) &&
10536
		   intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI)) {
10745
		   intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI)) {
10537
		crtc->scanline_offset = 2;
10746
		crtc->scanline_offset = 2;
10538
	} else
10747
	} else
10539
		crtc->scanline_offset = 1;
10748
		crtc->scanline_offset = 1;
10540
}
10749
}
-
 
10750
 
-
 
10751
static struct intel_crtc_config *
-
 
10752
intel_modeset_compute_config(struct drm_crtc *crtc,
-
 
10753
			     struct drm_display_mode *mode,
-
 
10754
			     struct drm_framebuffer *fb,
-
 
10755
			     unsigned *modeset_pipes,
-
 
10756
			     unsigned *prepare_pipes,
-
 
10757
			     unsigned *disable_pipes)
-
 
10758
{
-
 
10759
	struct intel_crtc_config *pipe_config = NULL;
-
 
10760
 
-
 
10761
	intel_modeset_affected_pipes(crtc, modeset_pipes,
-
 
10762
				     prepare_pipes, disable_pipes);
-
 
10763
 
-
 
10764
	if ((*modeset_pipes) == 0)
-
 
10765
		goto out;
-
 
10766
 
-
 
10767
	/*
-
 
10768
	 * Note this needs changes when we start tracking multiple modes
-
 
10769
	 * and crtcs.  At that point we'll need to compute the whole config
-
 
10770
	 * (i.e. one pipe_config for each crtc) rather than just the one
-
 
10771
	 * for this crtc.
-
 
10772
	 */
-
 
10773
	pipe_config = intel_modeset_pipe_config(crtc, fb, mode);
-
 
10774
	if (IS_ERR(pipe_config)) {
-
 
10775
		goto out;
-
 
10776
	}
-
 
10777
	intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config,
-
 
10778
			       "[modeset]");
-
 
10779
 
-
 
10780
out:
-
 
10781
	return pipe_config;
-
 
10782
}
10541
 
10783
 
10542
static int __intel_set_mode(struct drm_crtc *crtc,
10784
static int __intel_set_mode(struct drm_crtc *crtc,
10543
		    struct drm_display_mode *mode,
10785
		    struct drm_display_mode *mode,
-
 
10786
			    int x, int y, struct drm_framebuffer *fb,
-
 
10787
			    struct intel_crtc_config *pipe_config,
-
 
10788
			    unsigned modeset_pipes,
-
 
10789
			    unsigned prepare_pipes,
10544
		    int x, int y, struct drm_framebuffer *fb)
10790
			    unsigned disable_pipes)
10545
{
10791
{
10546
	struct drm_device *dev = crtc->dev;
10792
	struct drm_device *dev = crtc->dev;
10547
	struct drm_i915_private *dev_priv = dev->dev_private;
10793
	struct drm_i915_private *dev_priv = dev->dev_private;
10548
	struct drm_display_mode *saved_mode;
10794
	struct drm_display_mode *saved_mode;
10549
	struct intel_crtc_config *pipe_config = NULL;
-
 
10550
	struct intel_crtc *intel_crtc;
10795
	struct intel_crtc *intel_crtc;
10551
	unsigned disable_pipes, prepare_pipes, modeset_pipes;
-
 
10552
	int ret = 0;
10796
	int ret = 0;
10553
 
10797
 
10554
	saved_mode = kmalloc(sizeof(*saved_mode), GFP_KERNEL);
10798
	saved_mode = kmalloc(sizeof(*saved_mode), GFP_KERNEL);
10555
	if (!saved_mode)
10799
	if (!saved_mode)
10556
		return -ENOMEM;
10800
		return -ENOMEM;
10557
 
-
 
10558
	intel_modeset_affected_pipes(crtc, &modeset_pipes,
-
 
10559
				     &prepare_pipes, &disable_pipes);
-
 
10560
 
10801
 
10561
	*saved_mode = crtc->mode;
-
 
10562
 
-
 
10563
	/* Hack: Because we don't (yet) support global modeset on multiple
-
 
10564
	 * crtcs, we don't keep track of the new mode for more than one crtc.
-
 
10565
	 * Hence simply check whether any bit is set in modeset_pipes in all the
-
 
10566
	 * pieces of code that are not yet converted to deal with mutliple crtcs
10802
	*saved_mode = crtc->mode;
10567
	 * changing their mode at the same time. */
-
 
10568
	if (modeset_pipes) {
-
 
10569
		pipe_config = intel_modeset_pipe_config(crtc, fb, mode);
-
 
10570
		if (IS_ERR(pipe_config)) {
-
 
10571
			ret = PTR_ERR(pipe_config);
-
 
10572
			pipe_config = NULL;
-
 
10573
 
-
 
10574
			goto out;
-
 
10575
		}
-
 
10576
		intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config,
10803
 
10577
				       "[modeset]");
-
 
10578
		to_intel_crtc(crtc)->new_config = pipe_config;
10804
	if (modeset_pipes)
10579
	}
10805
		to_intel_crtc(crtc)->new_config = pipe_config;
10580
 
10806
 
10581
	/*
10807
	/*
10582
	 * See if the config requires any additional preparation, e.g.
10808
	 * See if the config requires any additional preparation, e.g.
10583
	 * to adjust global state with pipes off.  We need to do this
10809
	 * to adjust global state with pipes off.  We need to do this
10584
	 * here so we can get the modeset_pipe updated config for the new
10810
	 * here so we can get the modeset_pipe updated config for the new
10585
	 * mode set on this crtc.  For other crtcs we need to use the
10811
	 * mode set on this crtc.  For other crtcs we need to use the
10586
	 * adjusted_mode bits in the crtc directly.
10812
	 * adjusted_mode bits in the crtc directly.
10587
	 */
10813
	 */
10588
	if (IS_VALLEYVIEW(dev)) {
10814
	if (IS_VALLEYVIEW(dev)) {
10589
		valleyview_modeset_global_pipes(dev, &prepare_pipes);
10815
		valleyview_modeset_global_pipes(dev, &prepare_pipes);
10590
 
10816
 
10591
		/* may have added more to prepare_pipes than we should */
10817
		/* may have added more to prepare_pipes than we should */
10592
		prepare_pipes &= ~disable_pipes;
10818
		prepare_pipes &= ~disable_pipes;
10593
	}
10819
	}
-
 
10820
 
-
 
10821
	if (dev_priv->display.crtc_compute_clock) {
-
 
10822
		unsigned clear_pipes = modeset_pipes | disable_pipes;
-
 
10823
 
-
 
10824
		ret = intel_shared_dpll_start_config(dev_priv, clear_pipes);
-
 
10825
		if (ret)
-
 
10826
			goto done;
-
 
10827
 
-
 
10828
		for_each_intel_crtc_masked(dev, modeset_pipes, intel_crtc) {
-
 
10829
			ret = dev_priv->display.crtc_compute_clock(intel_crtc);
-
 
10830
			if (ret) {
-
 
10831
				intel_shared_dpll_abort_config(dev_priv);
-
 
10832
				goto done;
-
 
10833
			}
-
 
10834
		}
-
 
10835
	}
10594
 
10836
 
10595
	for_each_intel_crtc_masked(dev, disable_pipes, intel_crtc)
10837
	for_each_intel_crtc_masked(dev, disable_pipes, intel_crtc)
10596
		intel_crtc_disable(&intel_crtc->base);
10838
		intel_crtc_disable(&intel_crtc->base);
10597
 
10839
 
10598
	for_each_intel_crtc_masked(dev, prepare_pipes, intel_crtc) {
10840
	for_each_intel_crtc_masked(dev, prepare_pipes, intel_crtc) {
10599
		if (intel_crtc->base.enabled)
10841
		if (intel_crtc->base.enabled)
10600
			dev_priv->display.crtc_disable(&intel_crtc->base);
10842
			dev_priv->display.crtc_disable(&intel_crtc->base);
10601
	}
10843
	}
10602
 
10844
 
10603
	/* crtc->mode is already used by the ->mode_set callbacks, hence we need
10845
	/* crtc->mode is already used by the ->mode_set callbacks, hence we need
10604
	 * to set it here already despite that we pass it down the callchain.
10846
	 * to set it here already despite that we pass it down the callchain.
-
 
10847
	 *
-
 
10848
	 * Note we'll need to fix this up when we start tracking multiple
-
 
10849
	 * pipes; here we assume a single modeset_pipe and only track the
-
 
10850
	 * single crtc and mode.
10605
	 */
10851
	 */
10606
	if (modeset_pipes) {
10852
	if (modeset_pipes) {
10607
		crtc->mode = *mode;
10853
		crtc->mode = *mode;
10608
		/* mode_set/enable/disable functions rely on a correct pipe
10854
		/* mode_set/enable/disable functions rely on a correct pipe
10609
		 * config. */
10855
		 * config. */
10610
		to_intel_crtc(crtc)->config = *pipe_config;
10856
		to_intel_crtc(crtc)->config = *pipe_config;
10611
		to_intel_crtc(crtc)->new_config = &to_intel_crtc(crtc)->config;
10857
		to_intel_crtc(crtc)->new_config = &to_intel_crtc(crtc)->config;
10612
 
10858
 
10613
		/*
10859
		/*
10614
		 * Calculate and store various constants which
10860
		 * Calculate and store various constants which
10615
		 * are later needed by vblank and swap-completion
10861
		 * are later needed by vblank and swap-completion
10616
		 * timestamping. They are derived from true hwmode.
10862
		 * timestamping. They are derived from true hwmode.
10617
		 */
10863
		 */
10618
		drm_calc_timestamping_constants(crtc,
10864
		drm_calc_timestamping_constants(crtc,
10619
						&pipe_config->adjusted_mode);
10865
						&pipe_config->adjusted_mode);
10620
	}
10866
	}
10621
 
10867
 
10622
	/* Only after disabling all output pipelines that will be changed can we
10868
	/* Only after disabling all output pipelines that will be changed can we
10623
	 * update the the output configuration. */
10869
	 * update the the output configuration. */
10624
	intel_modeset_update_state(dev, prepare_pipes);
10870
	intel_modeset_update_state(dev, prepare_pipes);
10625
 
-
 
10626
	if (dev_priv->display.modeset_global_resources)
10871
 
10627
		dev_priv->display.modeset_global_resources(dev);
10872
	modeset_update_crtc_power_domains(dev);
10628
 
10873
 
10629
	/* Set up the DPLL and any encoders state that needs to adjust or depend
10874
	/* Set up the DPLL and any encoders state that needs to adjust or depend
10630
	 * on the DPLL.
10875
	 * on the DPLL.
10631
	 */
10876
	 */
10632
	for_each_intel_crtc_masked(dev, modeset_pipes, intel_crtc) {
10877
	for_each_intel_crtc_masked(dev, modeset_pipes, intel_crtc) {
10633
		struct drm_framebuffer *old_fb = crtc->primary->fb;
10878
		struct drm_framebuffer *old_fb = crtc->primary->fb;
10634
		struct drm_i915_gem_object *old_obj = intel_fb_obj(old_fb);
10879
		struct drm_i915_gem_object *old_obj = intel_fb_obj(old_fb);
10635
		struct drm_i915_gem_object *obj = intel_fb_obj(fb);
10880
		struct drm_i915_gem_object *obj = intel_fb_obj(fb);
10636
 
10881
 
10637
		mutex_lock(&dev->struct_mutex);
10882
		mutex_lock(&dev->struct_mutex);
10638
		ret = intel_pin_and_fence_fb_obj(dev,
10883
		ret = intel_pin_and_fence_fb_obj(crtc->primary, fb, NULL);
10639
						 obj,
-
 
10640
						 NULL);
-
 
10641
		if (ret != 0) {
10884
		if (ret != 0) {
10642
			DRM_ERROR("pin & fence failed\n");
10885
			DRM_ERROR("pin & fence failed\n");
10643
			mutex_unlock(&dev->struct_mutex);
10886
			mutex_unlock(&dev->struct_mutex);
10644
			goto done;
10887
			goto done;
10645
		}
10888
		}
10646
		if (old_fb)
10889
		if (old_fb)
10647
			intel_unpin_fb_obj(old_obj);
10890
			intel_unpin_fb_obj(old_obj);
10648
		i915_gem_track_fb(old_obj, obj,
10891
		i915_gem_track_fb(old_obj, obj,
10649
				  INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe));
10892
				  INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe));
10650
		mutex_unlock(&dev->struct_mutex);
10893
		mutex_unlock(&dev->struct_mutex);
10651
 
10894
 
10652
		crtc->primary->fb = fb;
10895
		crtc->primary->fb = fb;
10653
		crtc->x = x;
10896
		crtc->x = x;
10654
		crtc->y = y;
10897
		crtc->y = y;
10655
 
-
 
10656
		ret = dev_priv->display.crtc_mode_set(&intel_crtc->base,
-
 
10657
					   x, y, fb);
-
 
10658
		if (ret)
-
 
10659
		    goto done;
-
 
10660
	}
10898
	}
10661
 
10899
 
10662
	/* Now enable the clocks, plane, pipe, and connectors that we set up. */
10900
	/* Now enable the clocks, plane, pipe, and connectors that we set up. */
10663
	for_each_intel_crtc_masked(dev, prepare_pipes, intel_crtc) {
10901
	for_each_intel_crtc_masked(dev, prepare_pipes, intel_crtc) {
10664
		update_scanline_offset(intel_crtc);
10902
		update_scanline_offset(intel_crtc);
10665
 
10903
 
10666
		dev_priv->display.crtc_enable(&intel_crtc->base);
10904
		dev_priv->display.crtc_enable(&intel_crtc->base);
10667
	}
10905
	}
10668
 
10906
 
10669
	/* FIXME: add subpixel order */
10907
	/* FIXME: add subpixel order */
10670
done:
10908
done:
10671
	if (ret && crtc->enabled)
10909
	if (ret && crtc->enabled)
10672
		crtc->mode = *saved_mode;
10910
		crtc->mode = *saved_mode;
10673
 
-
 
10674
out:
10911
 
10675
	kfree(pipe_config);
10912
	kfree(pipe_config);
10676
	kfree(saved_mode);
10913
	kfree(saved_mode);
10677
	return ret;
10914
	return ret;
10678
}
10915
}
10679
 
10916
 
10680
static int intel_set_mode(struct drm_crtc *crtc,
10917
static int intel_set_mode_pipes(struct drm_crtc *crtc,
10681
		     struct drm_display_mode *mode,
10918
		     struct drm_display_mode *mode,
-
 
10919
				int x, int y, struct drm_framebuffer *fb,
-
 
10920
				struct intel_crtc_config *pipe_config,
-
 
10921
				unsigned modeset_pipes,
-
 
10922
				unsigned prepare_pipes,
10682
		     int x, int y, struct drm_framebuffer *fb)
10923
				unsigned disable_pipes)
10683
{
10924
{
10684
	int ret;
10925
	int ret;
10685
 
10926
 
-
 
10927
	ret = __intel_set_mode(crtc, mode, x, y, fb, pipe_config, modeset_pipes,
10686
	ret = __intel_set_mode(crtc, mode, x, y, fb);
10928
			       prepare_pipes, disable_pipes);
10687
 
10929
 
10688
	if (ret == 0)
10930
	if (ret == 0)
10689
		intel_modeset_check_state(crtc->dev);
10931
		intel_modeset_check_state(crtc->dev);
10690
 
10932
 
10691
	return ret;
10933
	return ret;
10692
}
10934
}
-
 
10935
 
-
 
10936
static int intel_set_mode(struct drm_crtc *crtc,
-
 
10937
			  struct drm_display_mode *mode,
-
 
10938
			  int x, int y, struct drm_framebuffer *fb)
-
 
10939
{
-
 
10940
	struct intel_crtc_config *pipe_config;
-
 
10941
	unsigned modeset_pipes, prepare_pipes, disable_pipes;
-
 
10942
 
-
 
10943
	pipe_config = intel_modeset_compute_config(crtc, mode, fb,
-
 
10944
						   &modeset_pipes,
-
 
10945
						   &prepare_pipes,
-
 
10946
						   &disable_pipes);
-
 
10947
 
-
 
10948
	if (IS_ERR(pipe_config))
-
 
10949
		return PTR_ERR(pipe_config);
-
 
10950
 
-
 
10951
	return intel_set_mode_pipes(crtc, mode, x, y, fb, pipe_config,
-
 
10952
				    modeset_pipes, prepare_pipes,
-
 
10953
				    disable_pipes);
-
 
10954
}
10693
 
10955
 
10694
void intel_crtc_restore_mode(struct drm_crtc *crtc)
10956
void intel_crtc_restore_mode(struct drm_crtc *crtc)
10695
{
10957
{
10696
	intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y, crtc->primary->fb);
10958
	intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y, crtc->primary->fb);
10697
}
10959
}
10698
 
10960
 
10699
#undef for_each_intel_crtc_masked
10961
#undef for_each_intel_crtc_masked
10700
 
10962
 
10701
static void intel_set_config_free(struct intel_set_config *config)
10963
static void intel_set_config_free(struct intel_set_config *config)
10702
{
10964
{
10703
	if (!config)
10965
	if (!config)
10704
		return;
10966
		return;
10705
 
10967
 
10706
	kfree(config->save_connector_encoders);
10968
	kfree(config->save_connector_encoders);
10707
	kfree(config->save_encoder_crtcs);
10969
	kfree(config->save_encoder_crtcs);
10708
	kfree(config->save_crtc_enabled);
10970
	kfree(config->save_crtc_enabled);
10709
	kfree(config);
10971
	kfree(config);
10710
}
10972
}
10711
 
10973
 
10712
static int intel_set_config_save_state(struct drm_device *dev,
10974
static int intel_set_config_save_state(struct drm_device *dev,
10713
				       struct intel_set_config *config)
10975
				       struct intel_set_config *config)
10714
{
10976
{
10715
	struct drm_crtc *crtc;
10977
	struct drm_crtc *crtc;
10716
	struct drm_encoder *encoder;
10978
	struct drm_encoder *encoder;
10717
	struct drm_connector *connector;
10979
	struct drm_connector *connector;
10718
	int count;
10980
	int count;
10719
 
10981
 
10720
	config->save_crtc_enabled =
10982
	config->save_crtc_enabled =
10721
		kcalloc(dev->mode_config.num_crtc,
10983
		kcalloc(dev->mode_config.num_crtc,
10722
			sizeof(bool), GFP_KERNEL);
10984
			sizeof(bool), GFP_KERNEL);
10723
	if (!config->save_crtc_enabled)
10985
	if (!config->save_crtc_enabled)
10724
		return -ENOMEM;
10986
		return -ENOMEM;
10725
 
10987
 
10726
	config->save_encoder_crtcs =
10988
	config->save_encoder_crtcs =
10727
		kcalloc(dev->mode_config.num_encoder,
10989
		kcalloc(dev->mode_config.num_encoder,
10728
			sizeof(struct drm_crtc *), GFP_KERNEL);
10990
			sizeof(struct drm_crtc *), GFP_KERNEL);
10729
	if (!config->save_encoder_crtcs)
10991
	if (!config->save_encoder_crtcs)
10730
		return -ENOMEM;
10992
		return -ENOMEM;
10731
 
10993
 
10732
	config->save_connector_encoders =
10994
	config->save_connector_encoders =
10733
		kcalloc(dev->mode_config.num_connector,
10995
		kcalloc(dev->mode_config.num_connector,
10734
			sizeof(struct drm_encoder *), GFP_KERNEL);
10996
			sizeof(struct drm_encoder *), GFP_KERNEL);
10735
	if (!config->save_connector_encoders)
10997
	if (!config->save_connector_encoders)
10736
		return -ENOMEM;
10998
		return -ENOMEM;
10737
 
10999
 
10738
	/* Copy data. Note that driver private data is not affected.
11000
	/* Copy data. Note that driver private data is not affected.
10739
	 * Should anything bad happen only the expected state is
11001
	 * Should anything bad happen only the expected state is
10740
	 * restored, not the drivers personal bookkeeping.
11002
	 * restored, not the drivers personal bookkeeping.
10741
	 */
11003
	 */
10742
	count = 0;
11004
	count = 0;
10743
	for_each_crtc(dev, crtc) {
11005
	for_each_crtc(dev, crtc) {
10744
		config->save_crtc_enabled[count++] = crtc->enabled;
11006
		config->save_crtc_enabled[count++] = crtc->enabled;
10745
	}
11007
	}
10746
 
11008
 
10747
	count = 0;
11009
	count = 0;
10748
	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
11010
	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
10749
		config->save_encoder_crtcs[count++] = encoder->crtc;
11011
		config->save_encoder_crtcs[count++] = encoder->crtc;
10750
	}
11012
	}
10751
 
11013
 
10752
	count = 0;
11014
	count = 0;
10753
	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
11015
	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
10754
		config->save_connector_encoders[count++] = connector->encoder;
11016
		config->save_connector_encoders[count++] = connector->encoder;
10755
	}
11017
	}
10756
 
11018
 
10757
	return 0;
11019
	return 0;
10758
}
11020
}
10759
 
11021
 
10760
static void intel_set_config_restore_state(struct drm_device *dev,
11022
static void intel_set_config_restore_state(struct drm_device *dev,
10761
					   struct intel_set_config *config)
11023
					   struct intel_set_config *config)
10762
{
11024
{
10763
	struct intel_crtc *crtc;
11025
	struct intel_crtc *crtc;
10764
	struct intel_encoder *encoder;
11026
	struct intel_encoder *encoder;
10765
	struct intel_connector *connector;
11027
	struct intel_connector *connector;
10766
	int count;
11028
	int count;
10767
 
11029
 
10768
	count = 0;
11030
	count = 0;
10769
	for_each_intel_crtc(dev, crtc) {
11031
	for_each_intel_crtc(dev, crtc) {
10770
		crtc->new_enabled = config->save_crtc_enabled[count++];
11032
		crtc->new_enabled = config->save_crtc_enabled[count++];
10771
 
11033
 
10772
		if (crtc->new_enabled)
11034
		if (crtc->new_enabled)
10773
			crtc->new_config = &crtc->config;
11035
			crtc->new_config = &crtc->config;
10774
		else
11036
		else
10775
			crtc->new_config = NULL;
11037
			crtc->new_config = NULL;
10776
	}
11038
	}
10777
 
11039
 
10778
	count = 0;
11040
	count = 0;
10779
	list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
11041
	for_each_intel_encoder(dev, encoder) {
10780
		encoder->new_crtc =
11042
		encoder->new_crtc =
10781
			to_intel_crtc(config->save_encoder_crtcs[count++]);
11043
			to_intel_crtc(config->save_encoder_crtcs[count++]);
10782
	}
11044
	}
10783
 
11045
 
10784
	count = 0;
11046
	count = 0;
10785
	list_for_each_entry(connector, &dev->mode_config.connector_list, base.head) {
11047
	list_for_each_entry(connector, &dev->mode_config.connector_list, base.head) {
10786
		connector->new_encoder =
11048
		connector->new_encoder =
10787
			to_intel_encoder(config->save_connector_encoders[count++]);
11049
			to_intel_encoder(config->save_connector_encoders[count++]);
10788
	}
11050
	}
10789
}
11051
}
10790
 
11052
 
10791
static bool
11053
static bool
10792
is_crtc_connector_off(struct drm_mode_set *set)
11054
is_crtc_connector_off(struct drm_mode_set *set)
10793
{
11055
{
10794
	int i;
11056
	int i;
10795
 
11057
 
10796
	if (set->num_connectors == 0)
11058
	if (set->num_connectors == 0)
10797
		return false;
11059
		return false;
10798
 
11060
 
10799
	if (WARN_ON(set->connectors == NULL))
11061
	if (WARN_ON(set->connectors == NULL))
10800
		return false;
11062
		return false;
10801
 
11063
 
10802
	for (i = 0; i < set->num_connectors; i++)
11064
	for (i = 0; i < set->num_connectors; i++)
10803
		if (set->connectors[i]->encoder &&
11065
		if (set->connectors[i]->encoder &&
10804
		    set->connectors[i]->encoder->crtc == set->crtc &&
11066
		    set->connectors[i]->encoder->crtc == set->crtc &&
10805
		    set->connectors[i]->dpms != DRM_MODE_DPMS_ON)
11067
		    set->connectors[i]->dpms != DRM_MODE_DPMS_ON)
10806
			return true;
11068
			return true;
10807
 
11069
 
10808
	return false;
11070
	return false;
10809
}
11071
}
10810
 
11072
 
10811
static void
11073
static void
10812
intel_set_config_compute_mode_changes(struct drm_mode_set *set,
11074
intel_set_config_compute_mode_changes(struct drm_mode_set *set,
10813
				      struct intel_set_config *config)
11075
				      struct intel_set_config *config)
10814
{
11076
{
10815
 
11077
 
10816
	/* We should be able to check here if the fb has the same properties
11078
	/* We should be able to check here if the fb has the same properties
10817
	 * and then just flip_or_move it */
11079
	 * and then just flip_or_move it */
10818
	if (is_crtc_connector_off(set)) {
11080
	if (is_crtc_connector_off(set)) {
10819
			config->mode_changed = true;
11081
			config->mode_changed = true;
10820
	} else if (set->crtc->primary->fb != set->fb) {
11082
	} else if (set->crtc->primary->fb != set->fb) {
10821
		/*
11083
		/*
10822
		 * If we have no fb, we can only flip as long as the crtc is
11084
		 * If we have no fb, we can only flip as long as the crtc is
10823
		 * active, otherwise we need a full mode set.  The crtc may
11085
		 * active, otherwise we need a full mode set.  The crtc may
10824
		 * be active if we've only disabled the primary plane, or
11086
		 * be active if we've only disabled the primary plane, or
10825
		 * in fastboot situations.
11087
		 * in fastboot situations.
10826
		 */
11088
		 */
10827
		if (set->crtc->primary->fb == NULL) {
11089
		if (set->crtc->primary->fb == NULL) {
10828
			struct intel_crtc *intel_crtc =
11090
			struct intel_crtc *intel_crtc =
10829
				to_intel_crtc(set->crtc);
11091
				to_intel_crtc(set->crtc);
10830
 
11092
 
10831
			if (intel_crtc->active) {
11093
			if (intel_crtc->active) {
10832
				DRM_DEBUG_KMS("crtc has no fb, will flip\n");
11094
				DRM_DEBUG_KMS("crtc has no fb, will flip\n");
10833
				config->fb_changed = true;
11095
				config->fb_changed = true;
10834
			} else {
11096
			} else {
10835
				DRM_DEBUG_KMS("inactive crtc, full mode set\n");
11097
				DRM_DEBUG_KMS("inactive crtc, full mode set\n");
10836
			config->mode_changed = true;
11098
			config->mode_changed = true;
10837
			}
11099
			}
10838
		} else if (set->fb == NULL) {
11100
		} else if (set->fb == NULL) {
10839
			config->mode_changed = true;
11101
			config->mode_changed = true;
10840
		} else if (set->fb->pixel_format !=
11102
		} else if (set->fb->pixel_format !=
10841
			   set->crtc->primary->fb->pixel_format) {
11103
			   set->crtc->primary->fb->pixel_format) {
10842
			config->mode_changed = true;
11104
			config->mode_changed = true;
10843
		} else {
11105
		} else {
10844
			config->fb_changed = true;
11106
			config->fb_changed = true;
10845
	}
11107
	}
10846
	}
11108
	}
10847
 
11109
 
10848
	if (set->fb && (set->x != set->crtc->x || set->y != set->crtc->y))
11110
	if (set->fb && (set->x != set->crtc->x || set->y != set->crtc->y))
10849
		config->fb_changed = true;
11111
		config->fb_changed = true;
10850
 
11112
 
10851
	if (set->mode && !drm_mode_equal(set->mode, &set->crtc->mode)) {
11113
	if (set->mode && !drm_mode_equal(set->mode, &set->crtc->mode)) {
10852
		DRM_DEBUG_KMS("modes are different, full mode set\n");
11114
		DRM_DEBUG_KMS("modes are different, full mode set\n");
10853
		drm_mode_debug_printmodeline(&set->crtc->mode);
11115
		drm_mode_debug_printmodeline(&set->crtc->mode);
10854
		drm_mode_debug_printmodeline(set->mode);
11116
		drm_mode_debug_printmodeline(set->mode);
10855
		config->mode_changed = true;
11117
		config->mode_changed = true;
10856
	}
11118
	}
10857
 
11119
 
10858
	DRM_DEBUG_KMS("computed changes for [CRTC:%d], mode_changed=%d, fb_changed=%d\n",
11120
	DRM_DEBUG_KMS("computed changes for [CRTC:%d], mode_changed=%d, fb_changed=%d\n",
10859
			set->crtc->base.id, config->mode_changed, config->fb_changed);
11121
			set->crtc->base.id, config->mode_changed, config->fb_changed);
10860
}
11122
}
10861
 
11123
 
10862
static int
11124
static int
10863
intel_modeset_stage_output_state(struct drm_device *dev,
11125
intel_modeset_stage_output_state(struct drm_device *dev,
10864
				 struct drm_mode_set *set,
11126
				 struct drm_mode_set *set,
10865
				 struct intel_set_config *config)
11127
				 struct intel_set_config *config)
10866
{
11128
{
10867
	struct intel_connector *connector;
11129
	struct intel_connector *connector;
10868
	struct intel_encoder *encoder;
11130
	struct intel_encoder *encoder;
10869
	struct intel_crtc *crtc;
11131
	struct intel_crtc *crtc;
10870
	int ro;
11132
	int ro;
10871
 
11133
 
10872
	/* The upper layers ensure that we either disable a crtc or have a list
11134
	/* The upper layers ensure that we either disable a crtc or have a list
10873
	 * of connectors. For paranoia, double-check this. */
11135
	 * of connectors. For paranoia, double-check this. */
10874
	WARN_ON(!set->fb && (set->num_connectors != 0));
11136
	WARN_ON(!set->fb && (set->num_connectors != 0));
10875
	WARN_ON(set->fb && (set->num_connectors == 0));
11137
	WARN_ON(set->fb && (set->num_connectors == 0));
10876
 
11138
 
10877
	list_for_each_entry(connector, &dev->mode_config.connector_list,
11139
	list_for_each_entry(connector, &dev->mode_config.connector_list,
10878
			    base.head) {
11140
			    base.head) {
10879
		/* Otherwise traverse passed in connector list and get encoders
11141
		/* Otherwise traverse passed in connector list and get encoders
10880
		 * for them. */
11142
		 * for them. */
10881
		for (ro = 0; ro < set->num_connectors; ro++) {
11143
		for (ro = 0; ro < set->num_connectors; ro++) {
10882
			if (set->connectors[ro] == &connector->base) {
11144
			if (set->connectors[ro] == &connector->base) {
10883
				connector->new_encoder = intel_find_encoder(connector, to_intel_crtc(set->crtc)->pipe);
11145
				connector->new_encoder = intel_find_encoder(connector, to_intel_crtc(set->crtc)->pipe);
10884
				break;
11146
				break;
10885
			}
11147
			}
10886
		}
11148
		}
10887
 
11149
 
10888
		/* If we disable the crtc, disable all its connectors. Also, if
11150
		/* If we disable the crtc, disable all its connectors. Also, if
10889
		 * the connector is on the changing crtc but not on the new
11151
		 * the connector is on the changing crtc but not on the new
10890
		 * connector list, disable it. */
11152
		 * connector list, disable it. */
10891
		if ((!set->fb || ro == set->num_connectors) &&
11153
		if ((!set->fb || ro == set->num_connectors) &&
10892
		    connector->base.encoder &&
11154
		    connector->base.encoder &&
10893
		    connector->base.encoder->crtc == set->crtc) {
11155
		    connector->base.encoder->crtc == set->crtc) {
10894
			connector->new_encoder = NULL;
11156
			connector->new_encoder = NULL;
10895
 
11157
 
10896
			DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [NOCRTC]\n",
11158
			DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [NOCRTC]\n",
10897
				connector->base.base.id,
11159
				connector->base.base.id,
10898
				connector->base.name);
11160
				connector->base.name);
10899
		}
11161
		}
10900
 
11162
 
10901
 
11163
 
10902
		if (&connector->new_encoder->base != connector->base.encoder) {
11164
		if (&connector->new_encoder->base != connector->base.encoder) {
10903
			DRM_DEBUG_KMS("encoder changed, full mode switch\n");
11165
			DRM_DEBUG_KMS("encoder changed, full mode switch\n");
10904
			config->mode_changed = true;
11166
			config->mode_changed = true;
10905
		}
11167
		}
10906
	}
11168
	}
10907
	/* connector->new_encoder is now updated for all connectors. */
11169
	/* connector->new_encoder is now updated for all connectors. */
10908
 
11170
 
10909
	/* Update crtc of enabled connectors. */
11171
	/* Update crtc of enabled connectors. */
10910
	list_for_each_entry(connector, &dev->mode_config.connector_list,
11172
	list_for_each_entry(connector, &dev->mode_config.connector_list,
10911
			    base.head) {
11173
			    base.head) {
10912
		struct drm_crtc *new_crtc;
11174
		struct drm_crtc *new_crtc;
10913
 
11175
 
10914
		if (!connector->new_encoder)
11176
		if (!connector->new_encoder)
10915
			continue;
11177
			continue;
10916
 
11178
 
10917
		new_crtc = connector->new_encoder->base.crtc;
11179
		new_crtc = connector->new_encoder->base.crtc;
10918
 
11180
 
10919
		for (ro = 0; ro < set->num_connectors; ro++) {
11181
		for (ro = 0; ro < set->num_connectors; ro++) {
10920
			if (set->connectors[ro] == &connector->base)
11182
			if (set->connectors[ro] == &connector->base)
10921
				new_crtc = set->crtc;
11183
				new_crtc = set->crtc;
10922
		}
11184
		}
10923
 
11185
 
10924
		/* Make sure the new CRTC will work with the encoder */
11186
		/* Make sure the new CRTC will work with the encoder */
10925
		if (!drm_encoder_crtc_ok(&connector->new_encoder->base,
11187
		if (!drm_encoder_crtc_ok(&connector->new_encoder->base,
10926
					   new_crtc)) {
11188
					   new_crtc)) {
10927
			return -EINVAL;
11189
			return -EINVAL;
10928
		}
11190
		}
10929
		connector->new_encoder->new_crtc = to_intel_crtc(new_crtc);
11191
		connector->new_encoder->new_crtc = to_intel_crtc(new_crtc);
10930
 
11192
 
10931
		DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [CRTC:%d]\n",
11193
		DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [CRTC:%d]\n",
10932
			connector->base.base.id,
11194
			connector->base.base.id,
10933
			connector->base.name,
11195
			connector->base.name,
10934
			new_crtc->base.id);
11196
			new_crtc->base.id);
10935
	}
11197
	}
10936
 
11198
 
10937
	/* Check for any encoders that needs to be disabled. */
11199
	/* Check for any encoders that needs to be disabled. */
10938
	list_for_each_entry(encoder, &dev->mode_config.encoder_list,
11200
	for_each_intel_encoder(dev, encoder) {
10939
			    base.head) {
-
 
10940
		int num_connectors = 0;
11201
		int num_connectors = 0;
10941
		list_for_each_entry(connector,
11202
		list_for_each_entry(connector,
10942
				    &dev->mode_config.connector_list,
11203
				    &dev->mode_config.connector_list,
10943
				    base.head) {
11204
				    base.head) {
10944
			if (connector->new_encoder == encoder) {
11205
			if (connector->new_encoder == encoder) {
10945
				WARN_ON(!connector->new_encoder->new_crtc);
11206
				WARN_ON(!connector->new_encoder->new_crtc);
10946
				num_connectors++;
11207
				num_connectors++;
10947
			}
11208
			}
10948
		}
11209
		}
10949
 
11210
 
10950
		if (num_connectors == 0)
11211
		if (num_connectors == 0)
10951
		encoder->new_crtc = NULL;
11212
		encoder->new_crtc = NULL;
10952
		else if (num_connectors > 1)
11213
		else if (num_connectors > 1)
10953
			return -EINVAL;
11214
			return -EINVAL;
10954
 
11215
 
10955
		/* Only now check for crtc changes so we don't miss encoders
11216
		/* Only now check for crtc changes so we don't miss encoders
10956
		 * that will be disabled. */
11217
		 * that will be disabled. */
10957
		if (&encoder->new_crtc->base != encoder->base.crtc) {
11218
		if (&encoder->new_crtc->base != encoder->base.crtc) {
10958
			DRM_DEBUG_KMS("crtc changed, full mode switch\n");
11219
			DRM_DEBUG_KMS("crtc changed, full mode switch\n");
10959
			config->mode_changed = true;
11220
			config->mode_changed = true;
10960
		}
11221
		}
10961
	}
11222
	}
10962
	/* Now we've also updated encoder->new_crtc for all encoders. */
11223
	/* Now we've also updated encoder->new_crtc for all encoders. */
10963
	list_for_each_entry(connector, &dev->mode_config.connector_list,
11224
	list_for_each_entry(connector, &dev->mode_config.connector_list,
10964
			    base.head) {
11225
			    base.head) {
10965
		if (connector->new_encoder)
11226
		if (connector->new_encoder)
10966
			if (connector->new_encoder != connector->encoder)
11227
			if (connector->new_encoder != connector->encoder)
10967
				connector->encoder = connector->new_encoder;
11228
				connector->encoder = connector->new_encoder;
10968
	}
11229
	}
10969
	for_each_intel_crtc(dev, crtc) {
11230
	for_each_intel_crtc(dev, crtc) {
10970
		crtc->new_enabled = false;
11231
		crtc->new_enabled = false;
10971
 
11232
 
10972
		list_for_each_entry(encoder,
-
 
10973
				    &dev->mode_config.encoder_list,
-
 
10974
				    base.head) {
11233
		for_each_intel_encoder(dev, encoder) {
10975
			if (encoder->new_crtc == crtc) {
11234
			if (encoder->new_crtc == crtc) {
10976
				crtc->new_enabled = true;
11235
				crtc->new_enabled = true;
10977
				break;
11236
				break;
10978
			}
11237
			}
10979
		}
11238
		}
10980
 
11239
 
10981
		if (crtc->new_enabled != crtc->base.enabled) {
11240
		if (crtc->new_enabled != crtc->base.enabled) {
10982
			DRM_DEBUG_KMS("crtc %sabled, full mode switch\n",
11241
			DRM_DEBUG_KMS("crtc %sabled, full mode switch\n",
10983
				      crtc->new_enabled ? "en" : "dis");
11242
				      crtc->new_enabled ? "en" : "dis");
10984
			config->mode_changed = true;
11243
			config->mode_changed = true;
10985
		}
11244
		}
10986
 
11245
 
10987
		if (crtc->new_enabled)
11246
		if (crtc->new_enabled)
10988
			crtc->new_config = &crtc->config;
11247
			crtc->new_config = &crtc->config;
10989
		else
11248
		else
10990
			crtc->new_config = NULL;
11249
			crtc->new_config = NULL;
10991
	}
11250
	}
10992
 
11251
 
10993
	return 0;
11252
	return 0;
10994
}
11253
}
10995
 
11254
 
10996
static void disable_crtc_nofb(struct intel_crtc *crtc)
11255
static void disable_crtc_nofb(struct intel_crtc *crtc)
10997
{
11256
{
10998
	struct drm_device *dev = crtc->base.dev;
11257
	struct drm_device *dev = crtc->base.dev;
10999
	struct intel_encoder *encoder;
11258
	struct intel_encoder *encoder;
11000
	struct intel_connector *connector;
11259
	struct intel_connector *connector;
11001
 
11260
 
11002
	DRM_DEBUG_KMS("Trying to restore without FB -> disabling pipe %c\n",
11261
	DRM_DEBUG_KMS("Trying to restore without FB -> disabling pipe %c\n",
11003
		      pipe_name(crtc->pipe));
11262
		      pipe_name(crtc->pipe));
11004
 
11263
 
11005
	list_for_each_entry(connector, &dev->mode_config.connector_list, base.head) {
11264
	list_for_each_entry(connector, &dev->mode_config.connector_list, base.head) {
11006
		if (connector->new_encoder &&
11265
		if (connector->new_encoder &&
11007
		    connector->new_encoder->new_crtc == crtc)
11266
		    connector->new_encoder->new_crtc == crtc)
11008
			connector->new_encoder = NULL;
11267
			connector->new_encoder = NULL;
11009
	}
11268
	}
11010
 
11269
 
11011
	list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
11270
	for_each_intel_encoder(dev, encoder) {
11012
		if (encoder->new_crtc == crtc)
11271
		if (encoder->new_crtc == crtc)
11013
			encoder->new_crtc = NULL;
11272
			encoder->new_crtc = NULL;
11014
	}
11273
	}
11015
 
11274
 
11016
	crtc->new_enabled = false;
11275
	crtc->new_enabled = false;
11017
	crtc->new_config = NULL;
11276
	crtc->new_config = NULL;
11018
}
11277
}
11019
 
11278
 
11020
static int intel_crtc_set_config(struct drm_mode_set *set)
11279
static int intel_crtc_set_config(struct drm_mode_set *set)
11021
{
11280
{
11022
	struct drm_device *dev;
11281
	struct drm_device *dev;
11023
	struct drm_mode_set save_set;
11282
	struct drm_mode_set save_set;
11024
	struct intel_set_config *config;
11283
	struct intel_set_config *config;
-
 
11284
	struct intel_crtc_config *pipe_config;
-
 
11285
	unsigned modeset_pipes, prepare_pipes, disable_pipes;
11025
	int ret;
11286
	int ret;
11026
 
11287
 
11027
	BUG_ON(!set);
11288
	BUG_ON(!set);
11028
	BUG_ON(!set->crtc);
11289
	BUG_ON(!set->crtc);
11029
	BUG_ON(!set->crtc->helper_private);
11290
	BUG_ON(!set->crtc->helper_private);
11030
 
11291
 
11031
	/* Enforce sane interface api - has been abused by the fb helper. */
11292
	/* Enforce sane interface api - has been abused by the fb helper. */
11032
	BUG_ON(!set->mode && set->fb);
11293
	BUG_ON(!set->mode && set->fb);
11033
	BUG_ON(set->fb && set->num_connectors == 0);
11294
	BUG_ON(set->fb && set->num_connectors == 0);
11034
 
11295
 
11035
	if (set->fb) {
11296
	if (set->fb) {
11036
		DRM_DEBUG_KMS("[CRTC:%d] [FB:%d] #connectors=%d (x y) (%i %i)\n",
11297
		DRM_DEBUG_KMS("[CRTC:%d] [FB:%d] #connectors=%d (x y) (%i %i)\n",
11037
				set->crtc->base.id, set->fb->base.id,
11298
				set->crtc->base.id, set->fb->base.id,
11038
				(int)set->num_connectors, set->x, set->y);
11299
				(int)set->num_connectors, set->x, set->y);
11039
	} else {
11300
	} else {
11040
		DRM_DEBUG_KMS("[CRTC:%d] [NOFB]\n", set->crtc->base.id);
11301
		DRM_DEBUG_KMS("[CRTC:%d] [NOFB]\n", set->crtc->base.id);
11041
	}
11302
	}
11042
 
11303
 
11043
	dev = set->crtc->dev;
11304
	dev = set->crtc->dev;
11044
 
11305
 
11045
	ret = -ENOMEM;
11306
	ret = -ENOMEM;
11046
	config = kzalloc(sizeof(*config), GFP_KERNEL);
11307
	config = kzalloc(sizeof(*config), GFP_KERNEL);
11047
	if (!config)
11308
	if (!config)
11048
		goto out_config;
11309
		goto out_config;
11049
 
11310
 
11050
	ret = intel_set_config_save_state(dev, config);
11311
	ret = intel_set_config_save_state(dev, config);
11051
	if (ret)
11312
	if (ret)
11052
		goto out_config;
11313
		goto out_config;
11053
 
11314
 
11054
	save_set.crtc = set->crtc;
11315
	save_set.crtc = set->crtc;
11055
	save_set.mode = &set->crtc->mode;
11316
	save_set.mode = &set->crtc->mode;
11056
	save_set.x = set->crtc->x;
11317
	save_set.x = set->crtc->x;
11057
	save_set.y = set->crtc->y;
11318
	save_set.y = set->crtc->y;
11058
	save_set.fb = set->crtc->primary->fb;
11319
	save_set.fb = set->crtc->primary->fb;
11059
 
11320
 
11060
	/* Compute whether we need a full modeset, only an fb base update or no
11321
	/* Compute whether we need a full modeset, only an fb base update or no
11061
	 * change at all. In the future we might also check whether only the
11322
	 * change at all. In the future we might also check whether only the
11062
	 * mode changed, e.g. for LVDS where we only change the panel fitter in
11323
	 * mode changed, e.g. for LVDS where we only change the panel fitter in
11063
	 * such cases. */
11324
	 * such cases. */
11064
	intel_set_config_compute_mode_changes(set, config);
11325
	intel_set_config_compute_mode_changes(set, config);
11065
 
11326
 
11066
	ret = intel_modeset_stage_output_state(dev, set, config);
11327
	ret = intel_modeset_stage_output_state(dev, set, config);
11067
	if (ret)
11328
	if (ret)
11068
		goto fail;
11329
		goto fail;
-
 
11330
 
-
 
11331
	pipe_config = intel_modeset_compute_config(set->crtc, set->mode,
-
 
11332
						   set->fb,
-
 
11333
						   &modeset_pipes,
-
 
11334
						   &prepare_pipes,
-
 
11335
						   &disable_pipes);
-
 
11336
	if (IS_ERR(pipe_config)) {
-
 
11337
		ret = PTR_ERR(pipe_config);
-
 
11338
		goto fail;
-
 
11339
	} else if (pipe_config) {
-
 
11340
		if (pipe_config->has_audio !=
-
 
11341
		    to_intel_crtc(set->crtc)->config.has_audio)
-
 
11342
			config->mode_changed = true;
-
 
11343
 
-
 
11344
		/*
-
 
11345
		 * Note we have an issue here with infoframes: current code
-
 
11346
		 * only updates them on the full mode set path per hw
-
 
11347
		 * requirements.  So here we should be checking for any
-
 
11348
		 * required changes and forcing a mode set.
-
 
11349
		 */
-
 
11350
	}
-
 
11351
 
-
 
11352
	/* set_mode will free it in the mode_changed case */
-
 
11353
	if (!config->mode_changed)
-
 
11354
		kfree(pipe_config);
-
 
11355
 
-
 
11356
	intel_update_pipe_size(to_intel_crtc(set->crtc));
11069
 
11357
 
11070
	if (config->mode_changed) {
11358
	if (config->mode_changed) {
11071
		ret = intel_set_mode(set->crtc, set->mode,
11359
		ret = intel_set_mode_pipes(set->crtc, set->mode,
-
 
11360
					   set->x, set->y, set->fb, pipe_config,
-
 
11361
					   modeset_pipes, prepare_pipes,
11072
				     set->x, set->y, set->fb);
11362
					   disable_pipes);
11073
	} else if (config->fb_changed) {
-
 
11074
		struct drm_i915_private *dev_priv = dev->dev_private;
11363
	} else if (config->fb_changed) {
-
 
11364
		struct intel_crtc *intel_crtc = to_intel_crtc(set->crtc);
11075
		struct intel_crtc *intel_crtc = to_intel_crtc(set->crtc);
11365
 
11076
 
11366
//       intel_crtc_wait_for_pending_flips(set->crtc);
11077
 
11367
 
11078
		ret = intel_pipe_set_base(set->crtc,
11368
		ret = intel_pipe_set_base(set->crtc,
11079
					  set->x, set->y, set->fb);
11369
					  set->x, set->y, set->fb);
11080
 
11370
 
11081
		/*
11371
		/*
11082
		 * We need to make sure the primary plane is re-enabled if it
11372
		 * We need to make sure the primary plane is re-enabled if it
11083
		 * has previously been turned off.
11373
		 * has previously been turned off.
11084
		 */
11374
		 */
11085
		if (!intel_crtc->primary_enabled && ret == 0) {
11375
		if (!intel_crtc->primary_enabled && ret == 0) {
11086
			WARN_ON(!intel_crtc->active);
11376
			WARN_ON(!intel_crtc->active);
11087
			intel_enable_primary_hw_plane(dev_priv, intel_crtc->plane,
11377
			intel_enable_primary_hw_plane(set->crtc->primary, set->crtc);
11088
						      intel_crtc->pipe);
-
 
11089
		}
11378
		}
11090
 
11379
 
11091
		/*
11380
		/*
11092
		 * In the fastboot case this may be our only check of the
11381
		 * In the fastboot case this may be our only check of the
11093
		 * state after boot.  It would be better to only do it on
11382
		 * state after boot.  It would be better to only do it on
11094
		 * the first update, but we don't have a nice way of doing that
11383
		 * the first update, but we don't have a nice way of doing that
11095
		 * (and really, set_config isn't used much for high freq page
11384
		 * (and really, set_config isn't used much for high freq page
11096
		 * flipping, so increasing its cost here shouldn't be a big
11385
		 * flipping, so increasing its cost here shouldn't be a big
11097
		 * deal).
11386
		 * deal).
11098
		 */
11387
		 */
11099
		if (i915.fastboot && ret == 0)
11388
		if (i915.fastboot && ret == 0)
11100
			intel_modeset_check_state(set->crtc->dev);
11389
			intel_modeset_check_state(set->crtc->dev);
11101
	}
11390
	}
11102
 
11391
 
11103
	if (ret) {
11392
	if (ret) {
11104
		DRM_DEBUG_KMS("failed to set mode on [CRTC:%d], err = %d\n",
11393
		DRM_DEBUG_KMS("failed to set mode on [CRTC:%d], err = %d\n",
11105
			  set->crtc->base.id, ret);
11394
			  set->crtc->base.id, ret);
11106
fail:
11395
fail:
11107
	intel_set_config_restore_state(dev, config);
11396
	intel_set_config_restore_state(dev, config);
11108
 
11397
 
11109
		/*
11398
		/*
11110
		 * HACK: if the pipe was on, but we didn't have a framebuffer,
11399
		 * HACK: if the pipe was on, but we didn't have a framebuffer,
11111
		 * force the pipe off to avoid oopsing in the modeset code
11400
		 * force the pipe off to avoid oopsing in the modeset code
11112
		 * due to fb==NULL. This should only happen during boot since
11401
		 * due to fb==NULL. This should only happen during boot since
11113
		 * we don't yet reconstruct the FB from the hardware state.
11402
		 * we don't yet reconstruct the FB from the hardware state.
11114
		 */
11403
		 */
11115
		if (to_intel_crtc(save_set.crtc)->new_enabled && !save_set.fb)
11404
		if (to_intel_crtc(save_set.crtc)->new_enabled && !save_set.fb)
11116
			disable_crtc_nofb(to_intel_crtc(save_set.crtc));
11405
			disable_crtc_nofb(to_intel_crtc(save_set.crtc));
11117
 
11406
 
11118
	/* Try to restore the config */
11407
	/* Try to restore the config */
11119
	if (config->mode_changed &&
11408
	if (config->mode_changed &&
11120
	    intel_set_mode(save_set.crtc, save_set.mode,
11409
	    intel_set_mode(save_set.crtc, save_set.mode,
11121
			    save_set.x, save_set.y, save_set.fb))
11410
			    save_set.x, save_set.y, save_set.fb))
11122
		DRM_ERROR("failed to restore config after modeset failure\n");
11411
		DRM_ERROR("failed to restore config after modeset failure\n");
11123
	}
11412
	}
11124
 
11413
 
11125
out_config:
11414
out_config:
11126
	intel_set_config_free(config);
11415
	intel_set_config_free(config);
11127
	return ret;
11416
	return ret;
11128
}
11417
}
11129
 
11418
 
11130
static const struct drm_crtc_funcs intel_crtc_funcs = {
11419
static const struct drm_crtc_funcs intel_crtc_funcs = {
11131
	.gamma_set = intel_crtc_gamma_set,
11420
	.gamma_set = intel_crtc_gamma_set,
11132
	.set_config = intel_crtc_set_config,
11421
	.set_config = intel_crtc_set_config,
11133
	.destroy = intel_crtc_destroy,
11422
	.destroy = intel_crtc_destroy,
11134
//	.page_flip = intel_crtc_page_flip,
11423
//	.page_flip = intel_crtc_page_flip,
11135
};
11424
};
11136
 
11425
 
11137
static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
11426
static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
11138
				      struct intel_shared_dpll *pll,
11427
				      struct intel_shared_dpll *pll,
11139
				      struct intel_dpll_hw_state *hw_state)
11428
				      struct intel_dpll_hw_state *hw_state)
11140
{
11429
{
11141
	uint32_t val;
11430
	uint32_t val;
11142
 
11431
 
11143
	if (!intel_display_power_enabled(dev_priv, POWER_DOMAIN_PLLS))
11432
	if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PLLS))
11144
		return false;
11433
		return false;
11145
 
11434
 
11146
	val = I915_READ(PCH_DPLL(pll->id));
11435
	val = I915_READ(PCH_DPLL(pll->id));
11147
	hw_state->dpll = val;
11436
	hw_state->dpll = val;
11148
	hw_state->fp0 = I915_READ(PCH_FP0(pll->id));
11437
	hw_state->fp0 = I915_READ(PCH_FP0(pll->id));
11149
	hw_state->fp1 = I915_READ(PCH_FP1(pll->id));
11438
	hw_state->fp1 = I915_READ(PCH_FP1(pll->id));
11150
 
11439
 
11151
	return val & DPLL_VCO_ENABLE;
11440
	return val & DPLL_VCO_ENABLE;
11152
}
11441
}
11153
 
11442
 
11154
static void ibx_pch_dpll_mode_set(struct drm_i915_private *dev_priv,
11443
static void ibx_pch_dpll_mode_set(struct drm_i915_private *dev_priv,
11155
				  struct intel_shared_dpll *pll)
11444
				  struct intel_shared_dpll *pll)
11156
{
11445
{
11157
	I915_WRITE(PCH_FP0(pll->id), pll->hw_state.fp0);
11446
	I915_WRITE(PCH_FP0(pll->id), pll->config.hw_state.fp0);
11158
	I915_WRITE(PCH_FP1(pll->id), pll->hw_state.fp1);
11447
	I915_WRITE(PCH_FP1(pll->id), pll->config.hw_state.fp1);
11159
}
11448
}
11160
 
11449
 
11161
static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
11450
static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
11162
				struct intel_shared_dpll *pll)
11451
				struct intel_shared_dpll *pll)
11163
{
11452
{
11164
	/* PCH refclock must be enabled first */
11453
	/* PCH refclock must be enabled first */
11165
	ibx_assert_pch_refclk_enabled(dev_priv);
11454
	ibx_assert_pch_refclk_enabled(dev_priv);
11166
 
11455
 
11167
	I915_WRITE(PCH_DPLL(pll->id), pll->hw_state.dpll);
11456
	I915_WRITE(PCH_DPLL(pll->id), pll->config.hw_state.dpll);
11168
 
11457
 
11169
	/* Wait for the clocks to stabilize. */
11458
	/* Wait for the clocks to stabilize. */
11170
	POSTING_READ(PCH_DPLL(pll->id));
11459
	POSTING_READ(PCH_DPLL(pll->id));
11171
	udelay(150);
11460
	udelay(150);
11172
 
11461
 
11173
	/* The pixel multiplier can only be updated once the
11462
	/* The pixel multiplier can only be updated once the
11174
	 * DPLL is enabled and the clocks are stable.
11463
	 * DPLL is enabled and the clocks are stable.
11175
	 *
11464
	 *
11176
	 * So write it again.
11465
	 * So write it again.
11177
	 */
11466
	 */
11178
	I915_WRITE(PCH_DPLL(pll->id), pll->hw_state.dpll);
11467
	I915_WRITE(PCH_DPLL(pll->id), pll->config.hw_state.dpll);
11179
	POSTING_READ(PCH_DPLL(pll->id));
11468
	POSTING_READ(PCH_DPLL(pll->id));
11180
	udelay(200);
11469
	udelay(200);
11181
}
11470
}
11182
 
11471
 
11183
static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
11472
static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
11184
				 struct intel_shared_dpll *pll)
11473
				 struct intel_shared_dpll *pll)
11185
{
11474
{
11186
	struct drm_device *dev = dev_priv->dev;
11475
	struct drm_device *dev = dev_priv->dev;
11187
	struct intel_crtc *crtc;
11476
	struct intel_crtc *crtc;
11188
 
11477
 
11189
	/* Make sure no transcoder isn't still depending on us. */
11478
	/* Make sure no transcoder isn't still depending on us. */
11190
	for_each_intel_crtc(dev, crtc) {
11479
	for_each_intel_crtc(dev, crtc) {
11191
		if (intel_crtc_to_shared_dpll(crtc) == pll)
11480
		if (intel_crtc_to_shared_dpll(crtc) == pll)
11192
			assert_pch_transcoder_disabled(dev_priv, crtc->pipe);
11481
			assert_pch_transcoder_disabled(dev_priv, crtc->pipe);
11193
	}
11482
	}
11194
 
11483
 
11195
	I915_WRITE(PCH_DPLL(pll->id), 0);
11484
	I915_WRITE(PCH_DPLL(pll->id), 0);
11196
	POSTING_READ(PCH_DPLL(pll->id));
11485
	POSTING_READ(PCH_DPLL(pll->id));
11197
	udelay(200);
11486
	udelay(200);
11198
}
11487
}
11199
 
11488
 
11200
static char *ibx_pch_dpll_names[] = {
11489
static char *ibx_pch_dpll_names[] = {
11201
	"PCH DPLL A",
11490
	"PCH DPLL A",
11202
	"PCH DPLL B",
11491
	"PCH DPLL B",
11203
};
11492
};
11204
 
11493
 
11205
static void ibx_pch_dpll_init(struct drm_device *dev)
11494
static void ibx_pch_dpll_init(struct drm_device *dev)
11206
{
11495
{
11207
	struct drm_i915_private *dev_priv = dev->dev_private;
11496
	struct drm_i915_private *dev_priv = dev->dev_private;
11208
	int i;
11497
	int i;
11209
 
11498
 
11210
	dev_priv->num_shared_dpll = 2;
11499
	dev_priv->num_shared_dpll = 2;
11211
 
11500
 
11212
	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
11501
	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
11213
		dev_priv->shared_dplls[i].id = i;
11502
		dev_priv->shared_dplls[i].id = i;
11214
		dev_priv->shared_dplls[i].name = ibx_pch_dpll_names[i];
11503
		dev_priv->shared_dplls[i].name = ibx_pch_dpll_names[i];
11215
		dev_priv->shared_dplls[i].mode_set = ibx_pch_dpll_mode_set;
11504
		dev_priv->shared_dplls[i].mode_set = ibx_pch_dpll_mode_set;
11216
		dev_priv->shared_dplls[i].enable = ibx_pch_dpll_enable;
11505
		dev_priv->shared_dplls[i].enable = ibx_pch_dpll_enable;
11217
		dev_priv->shared_dplls[i].disable = ibx_pch_dpll_disable;
11506
		dev_priv->shared_dplls[i].disable = ibx_pch_dpll_disable;
11218
		dev_priv->shared_dplls[i].get_hw_state =
11507
		dev_priv->shared_dplls[i].get_hw_state =
11219
			ibx_pch_dpll_get_hw_state;
11508
			ibx_pch_dpll_get_hw_state;
11220
	}
11509
	}
11221
}
11510
}
11222
 
11511
 
11223
static void intel_shared_dpll_init(struct drm_device *dev)
11512
static void intel_shared_dpll_init(struct drm_device *dev)
11224
{
11513
{
11225
	struct drm_i915_private *dev_priv = dev->dev_private;
11514
	struct drm_i915_private *dev_priv = dev->dev_private;
11226
 
11515
 
11227
	if (HAS_DDI(dev))
11516
	if (HAS_DDI(dev))
11228
		intel_ddi_pll_init(dev);
11517
		intel_ddi_pll_init(dev);
11229
	else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
11518
	else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
11230
		ibx_pch_dpll_init(dev);
11519
		ibx_pch_dpll_init(dev);
11231
	else
11520
	else
11232
		dev_priv->num_shared_dpll = 0;
11521
		dev_priv->num_shared_dpll = 0;
11233
 
11522
 
11234
	BUG_ON(dev_priv->num_shared_dpll > I915_NUM_PLLS);
11523
	BUG_ON(dev_priv->num_shared_dpll > I915_NUM_PLLS);
11235
}
11524
}
11236
 
11525
 
11237
static int
11526
static int
11238
intel_primary_plane_disable(struct drm_plane *plane)
11527
intel_primary_plane_disable(struct drm_plane *plane)
11239
{
11528
{
11240
	struct drm_device *dev = plane->dev;
11529
	struct drm_device *dev = plane->dev;
11241
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
11242
	struct intel_plane *intel_plane = to_intel_plane(plane);
-
 
11243
	struct intel_crtc *intel_crtc;
11530
	struct intel_crtc *intel_crtc;
11244
 
11531
 
11245
	if (!plane->fb)
11532
	if (!plane->fb)
11246
		return 0;
11533
		return 0;
11247
 
11534
 
11248
	BUG_ON(!plane->crtc);
11535
	BUG_ON(!plane->crtc);
11249
 
11536
 
11250
	intel_crtc = to_intel_crtc(plane->crtc);
11537
	intel_crtc = to_intel_crtc(plane->crtc);
11251
 
11538
 
11252
	/*
11539
	/*
11253
	 * Even though we checked plane->fb above, it's still possible that
11540
	 * Even though we checked plane->fb above, it's still possible that
11254
	 * the primary plane has been implicitly disabled because the crtc
11541
	 * the primary plane has been implicitly disabled because the crtc
11255
	 * coordinates given weren't visible, or because we detected
11542
	 * coordinates given weren't visible, or because we detected
11256
	 * that it was 100% covered by a sprite plane.  Or, the CRTC may be
11543
	 * that it was 100% covered by a sprite plane.  Or, the CRTC may be
11257
	 * off and we've set a fb, but haven't actually turned on the CRTC yet.
11544
	 * off and we've set a fb, but haven't actually turned on the CRTC yet.
11258
	 * In either case, we need to unpin the FB and let the fb pointer get
11545
	 * In either case, we need to unpin the FB and let the fb pointer get
11259
	 * updated, but otherwise we don't need to touch the hardware.
11546
	 * updated, but otherwise we don't need to touch the hardware.
11260
	 */
11547
	 */
11261
	if (!intel_crtc->primary_enabled)
11548
	if (!intel_crtc->primary_enabled)
11262
		goto disable_unpin;
11549
		goto disable_unpin;
11263
 
11550
 
11264
	intel_disable_primary_hw_plane(dev_priv, intel_plane->plane,
11551
//   intel_crtc_wait_for_pending_flips(plane->crtc);
-
 
11552
	intel_disable_primary_hw_plane(plane, plane->crtc);
11265
				       intel_plane->pipe);
11553
 
11266
disable_unpin:
11554
disable_unpin:
11267
	mutex_lock(&dev->struct_mutex);
11555
	mutex_lock(&dev->struct_mutex);
11268
	i915_gem_track_fb(intel_fb_obj(plane->fb), NULL,
11556
	i915_gem_track_fb(intel_fb_obj(plane->fb), NULL,
11269
			  INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe));
11557
			  INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe));
11270
	intel_unpin_fb_obj(intel_fb_obj(plane->fb));
11558
	intel_unpin_fb_obj(intel_fb_obj(plane->fb));
11271
	mutex_unlock(&dev->struct_mutex);
11559
	mutex_unlock(&dev->struct_mutex);
11272
	plane->fb = NULL;
11560
	plane->fb = NULL;
11273
 
11561
 
11274
	return 0;
11562
	return 0;
11275
}
11563
}
11276
 
11564
 
11277
static int
11565
static int
11278
intel_primary_plane_setplane(struct drm_plane *plane, struct drm_crtc *crtc,
11566
intel_check_primary_plane(struct drm_plane *plane,
11279
			     struct drm_framebuffer *fb, int crtc_x, int crtc_y,
-
 
11280
			     unsigned int crtc_w, unsigned int crtc_h,
-
 
11281
			     uint32_t src_x, uint32_t src_y,
-
 
11282
			     uint32_t src_w, uint32_t src_h)
11567
			  struct intel_plane_state *state)
11283
{
11568
{
-
 
11569
	struct drm_crtc *crtc = state->crtc;
-
 
11570
	struct drm_framebuffer *fb = state->fb;
-
 
11571
	struct drm_rect *dest = &state->dst;
-
 
11572
	struct drm_rect *src = &state->src;
-
 
11573
	const struct drm_rect *clip = &state->clip;
-
 
11574
 
-
 
11575
	return drm_plane_helper_check_update(plane, crtc, fb,
-
 
11576
					     src, dest, clip,
-
 
11577
					     DRM_PLANE_HELPER_NO_SCALING,
-
 
11578
					     DRM_PLANE_HELPER_NO_SCALING,
-
 
11579
					     false, true, &state->visible);
-
 
11580
}
-
 
11581
 
-
 
11582
static int
-
 
11583
intel_prepare_primary_plane(struct drm_plane *plane,
-
 
11584
			    struct intel_plane_state *state)
-
 
11585
{
-
 
11586
	struct drm_crtc *crtc = state->crtc;
-
 
11587
	struct drm_framebuffer *fb = state->fb;
11284
	struct drm_device *dev = crtc->dev;
11588
	struct drm_device *dev = crtc->dev;
11285
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
11286
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11589
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11287
	struct intel_plane *intel_plane = to_intel_plane(plane);
11590
	enum pipe pipe = intel_crtc->pipe;
11288
	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
11591
	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
11289
	struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->fb);
11592
	struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->fb);
11290
	struct drm_rect dest = {
-
 
11291
		/* integer pixels */
-
 
11292
		.x1 = crtc_x,
-
 
11293
		.y1 = crtc_y,
-
 
11294
		.x2 = crtc_x + crtc_w,
-
 
11295
		.y2 = crtc_y + crtc_h,
-
 
11296
	};
-
 
11297
	struct drm_rect src = {
-
 
11298
		/* 16.16 fixed point */
-
 
11299
		.x1 = src_x,
-
 
11300
		.y1 = src_y,
-
 
11301
		.x2 = src_x + src_w,
-
 
11302
		.y2 = src_y + src_h,
-
 
11303
	};
-
 
11304
	const struct drm_rect clip = {
-
 
11305
		/* integer pixels */
-
 
11306
		.x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0,
-
 
11307
		.y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0,
-
 
11308
	};
-
 
11309
	bool visible;
-
 
11310
	int ret;
11593
	int ret;
11311
 
-
 
11312
	ret = drm_plane_helper_check_update(plane, crtc, fb,
-
 
11313
					    &src, &dest, &clip,
-
 
11314
					    DRM_PLANE_HELPER_NO_SCALING,
-
 
11315
					    DRM_PLANE_HELPER_NO_SCALING,
-
 
11316
					    false, true, &visible);
-
 
11317
 
-
 
11318
	if (ret)
-
 
11319
		return ret;
-
 
11320
 
-
 
11321
	/*
-
 
11322
	 * If the CRTC isn't enabled, we're just pinning the framebuffer,
-
 
11323
	 * updating the fb pointer, and returning without touching the
-
 
11324
	 * hardware.  This allows us to later do a drmModeSetCrtc with fb=-1 to
11594
 
11325
	 * turn on the display with all planes setup as desired.
11595
 
-
 
11596
 
-
 
11597
	if (old_obj != obj) {
-
 
11598
		mutex_lock(&dev->struct_mutex);
-
 
11599
		ret = intel_pin_and_fence_fb_obj(plane, fb, NULL);
-
 
11600
		if (ret == 0)
-
 
11601
			i915_gem_track_fb(old_obj, obj,
-
 
11602
					  INTEL_FRONTBUFFER_PRIMARY(pipe));
-
 
11603
		mutex_unlock(&dev->struct_mutex);
-
 
11604
		if (ret != 0) {
-
 
11605
			DRM_DEBUG_KMS("pin & fence failed\n");
11326
	 */
11606
		return ret;
11327
	if (!crtc->enabled) {
-
 
11328
		mutex_lock(&dev->struct_mutex);
-
 
11329
 
11607
		}
11330
		/*
-
 
11331
		 * If we already called setplane while the crtc was disabled,
-
 
-
 
11608
	}
-
 
11609
 
-
 
11610
	return 0;
-
 
11611
}
-
 
11612
 
11332
		 * we may have an fb pinned; unpin it.
11613
static void
-
 
11614
intel_commit_primary_plane(struct drm_plane *plane,
-
 
11615
			   struct intel_plane_state *state)
-
 
11616
{
11333
		 */
11617
	struct drm_crtc *crtc = state->crtc;
-
 
11618
	struct drm_framebuffer *fb = state->fb;
-
 
11619
	struct drm_device *dev = crtc->dev;
-
 
11620
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
11621
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-
 
11622
	enum pipe pipe = intel_crtc->pipe;
11334
		if (plane->fb)
11623
	struct drm_framebuffer *old_fb = plane->fb;
11335
			intel_unpin_fb_obj(old_obj);
11624
	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
11336
 
11625
	struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->fb);
-
 
11626
	struct intel_plane *intel_plane = to_intel_plane(plane);
-
 
11627
	struct drm_rect *src = &state->src;
-
 
11628
 
-
 
11629
	crtc->primary->fb = fb;
-
 
11630
	crtc->x = src->x1 >> 16;
-
 
11631
	crtc->y = src->y1 >> 16;
-
 
11632
 
-
 
11633
	intel_plane->crtc_x = state->orig_dst.x1;
-
 
11634
	intel_plane->crtc_y = state->orig_dst.y1;
-
 
11635
	intel_plane->crtc_w = drm_rect_width(&state->orig_dst);
-
 
11636
	intel_plane->crtc_h = drm_rect_height(&state->orig_dst);
-
 
11637
	intel_plane->src_x = state->orig_src.x1;
-
 
11638
	intel_plane->src_y = state->orig_src.y1;
-
 
11639
	intel_plane->src_w = drm_rect_width(&state->orig_src);
-
 
11640
	intel_plane->src_h = drm_rect_height(&state->orig_src);
-
 
11641
	intel_plane->obj = obj;
-
 
11642
 
-
 
11643
	if (intel_crtc->active) {
-
 
11644
		/*
11337
		i915_gem_track_fb(old_obj, obj,
11645
		 * FBC does not work on some platforms for rotated
-
 
11646
		 * planes, so disable it when rotation is not 0 and
-
 
11647
		 * update it when rotation is set back to 0.
-
 
11648
		 *
-
 
11649
		 * FIXME: This is redundant with the fbc update done in
-
 
11650
		 * the primary plane enable function except that that
-
 
11651
		 * one is done too late. We eventually need to unify
11338
				  INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe));
11652
		 * this.
-
 
11653
		 */
-
 
11654
		if (intel_crtc->primary_enabled &&
-
 
11655
		    INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev) &&
-
 
11656
		    dev_priv->fbc.plane == intel_crtc->plane &&
-
 
11657
		    intel_plane->rotation != BIT(DRM_ROTATE_0)) {
-
 
11658
			intel_disable_fbc(dev);
-
 
11659
		}
-
 
11660
 
-
 
11661
		if (state->visible) {
-
 
11662
			bool was_enabled = intel_crtc->primary_enabled;
-
 
11663
 
-
 
11664
			/* FIXME: kill this fastboot hack */
-
 
11665
			intel_update_pipe_size(intel_crtc);
-
 
11666
 
-
 
11667
			intel_crtc->primary_enabled = true;
-
 
11668
 
-
 
11669
			dev_priv->display.update_primary_plane(crtc, plane->fb,
-
 
11670
					crtc->x, crtc->y);
11339
 
11671
 
11340
		/* Pin and return without programming hardware */
11672
	/*
11341
		ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
11673
			 * BDW signals flip done immediately if the plane
-
 
11674
			 * is disabled, even if the plane enable is already
11342
		mutex_unlock(&dev->struct_mutex);
11675
			 * armed to occur at the next vblank :(
11343
 
11676
			 */
11344
		return ret;
11677
			if (IS_BROADWELL(dev) && !was_enabled)
11345
	}
11678
				intel_wait_for_vblank(dev, intel_crtc->pipe);
-
 
11679
		} else {
-
 
11680
			/*
-
 
11681
			 * If clipping results in a non-visible primary plane,
-
 
11682
			 * we'll disable the primary plane.  Note that this is
11346
 
11683
			 * a bit different than what happens if userspace
-
 
11684
			 * explicitly disables the plane by passing fb=0
-
 
11685
	 * because plane->fb still gets set and pinned.
-
 
11686
	 */
11347
 
-
 
11348
	/*
11687
			intel_disable_primary_hw_plane(plane, crtc);
11349
	 * If clipping results in a non-visible primary plane, we'll disable
11688
		}
-
 
11689
 
11350
	 * the primary plane.  Note that this is a bit different than what
11690
		intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_PRIMARY(pipe));
11351
	 * happens if userspace explicitly disables the plane by passing fb=0
11691
 
11352
	 * because plane->fb still gets set and pinned.
11692
		mutex_lock(&dev->struct_mutex);
11353
	 */
-
 
11354
	if (!visible) {
11693
		intel_update_fbc(dev);
11355
		mutex_lock(&dev->struct_mutex);
-
 
11356
 
11694
		mutex_unlock(&dev->struct_mutex);
11357
		/*
11695
	}
11358
		 * Try to pin the new fb first so that we can bail out if we
11696
 
11359
		 * fail.
11697
	if (old_fb && old_fb != fb) {
11360
		 */
11698
		if (intel_crtc->active)
11361
		if (plane->fb != fb) {
11699
			intel_wait_for_vblank(dev, intel_crtc->pipe);
11362
			ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
11700
 
11363
			if (ret) {
11701
		mutex_lock(&dev->struct_mutex);
11364
				mutex_unlock(&dev->struct_mutex);
11702
		intel_unpin_fb_obj(old_obj);
11365
				return ret;
11703
				mutex_unlock(&dev->struct_mutex);
11366
			}
11704
		}
11367
		}
11705
}
-
 
11706
 
-
 
11707
static int
-
 
11708
intel_primary_plane_setplane(struct drm_plane *plane, struct drm_crtc *crtc,
-
 
11709
			     struct drm_framebuffer *fb, int crtc_x, int crtc_y,
11368
 
11710
			     unsigned int crtc_w, unsigned int crtc_h,
-
 
11711
			     uint32_t src_x, uint32_t src_y,
-
 
11712
			     uint32_t src_w, uint32_t src_h)
-
 
11713
{
11369
		i915_gem_track_fb(old_obj, obj,
11714
	struct intel_plane_state state;
-
 
11715
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11370
				  INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe));
11716
	int ret;
11371
 
-
 
11372
		if (intel_crtc->primary_enabled)
-
 
11373
			intel_disable_primary_hw_plane(dev_priv,
11717
 
-
 
11718
	state.crtc = crtc;
-
 
11719
	state.fb = fb;
-
 
11720
 
-
 
11721
	/* sample coordinates in 16.16 fixed point */
-
 
11722
	state.src.x1 = src_x;
11374
						       intel_plane->plane,
11723
	state.src.x2 = src_x + src_w;
-
 
11724
	state.src.y1 = src_y;
-
 
11725
	state.src.y2 = src_y + src_h;
-
 
11726
 
-
 
11727
	/* integer pixels */
-
 
11728
	state.dst.x1 = crtc_x;
11375
						       intel_plane->pipe);
11729
	state.dst.x2 = crtc_x + crtc_w;
11376
 
11730
	state.dst.y1 = crtc_y;
-
 
11731
	state.dst.y2 = crtc_y + crtc_h;
-
 
11732
 
-
 
11733
	state.clip.x1 = 0;
11377
 
11734
	state.clip.y1 = 0;
-
 
11735
	state.clip.x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0;
-
 
11736
	state.clip.y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0;
11378
		if (plane->fb != fb)
11737
 
11379
			if (plane->fb)
-
 
11380
				intel_unpin_fb_obj(old_obj);
11738
	state.orig_src = state.src;
11381
 
11739
	state.orig_dst = state.dst;
11382
		mutex_unlock(&dev->struct_mutex);
11740
 
11383
 
11741
	ret = intel_check_primary_plane(plane, &state);
11384
		return 0;
11742
	if (ret)
11385
	}
11743
		return ret;
11386
 
11744
 
11387
	ret = intel_pipe_set_base(crtc, src.x1, src.y1, fb);
11745
	ret = intel_prepare_primary_plane(plane, &state);
11388
	if (ret)
11746
	if (ret)
11389
		return ret;
11747
		return ret;
11390
 
-
 
11391
	if (!intel_crtc->primary_enabled)
11748
 
11392
		intel_enable_primary_hw_plane(dev_priv, intel_crtc->plane,
-
 
11393
					      intel_crtc->pipe);
11749
	intel_commit_primary_plane(plane, &state);
11394
 
11750
 
11395
	return 0;
11751
	return 0;
11396
}
11752
}
11397
 
11753
 
11398
/* Common destruction function for both primary and cursor planes */
11754
/* Common destruction function for both primary and cursor planes */
11399
static void intel_plane_destroy(struct drm_plane *plane)
11755
static void intel_plane_destroy(struct drm_plane *plane)
11400
{
11756
{
11401
	struct intel_plane *intel_plane = to_intel_plane(plane);
11757
	struct intel_plane *intel_plane = to_intel_plane(plane);
11402
	drm_plane_cleanup(plane);
11758
	drm_plane_cleanup(plane);
11403
	kfree(intel_plane);
11759
	kfree(intel_plane);
11404
}
11760
}
11405
 
11761
 
11406
static const struct drm_plane_funcs intel_primary_plane_funcs = {
11762
static const struct drm_plane_funcs intel_primary_plane_funcs = {
11407
	.update_plane = intel_primary_plane_setplane,
11763
	.update_plane = intel_primary_plane_setplane,
11408
	.disable_plane = intel_primary_plane_disable,
11764
	.disable_plane = intel_primary_plane_disable,
11409
	.destroy = intel_plane_destroy,
11765
	.destroy = intel_plane_destroy,
-
 
11766
	.set_property = intel_plane_set_property
11410
};
11767
};
11411
 
11768
 
11412
static struct drm_plane *intel_primary_plane_create(struct drm_device *dev,
11769
static struct drm_plane *intel_primary_plane_create(struct drm_device *dev,
11413
						    int pipe)
11770
						    int pipe)
11414
{
11771
{
11415
	struct intel_plane *primary;
11772
	struct intel_plane *primary;
11416
	const uint32_t *intel_primary_formats;
11773
	const uint32_t *intel_primary_formats;
11417
	int num_formats;
11774
	int num_formats;
11418
 
11775
 
11419
	primary = kzalloc(sizeof(*primary), GFP_KERNEL);
11776
	primary = kzalloc(sizeof(*primary), GFP_KERNEL);
11420
	if (primary == NULL)
11777
	if (primary == NULL)
11421
		return NULL;
11778
		return NULL;
11422
 
11779
 
11423
	primary->can_scale = false;
11780
	primary->can_scale = false;
11424
	primary->max_downscale = 1;
11781
	primary->max_downscale = 1;
11425
	primary->pipe = pipe;
11782
	primary->pipe = pipe;
11426
	primary->plane = pipe;
11783
	primary->plane = pipe;
-
 
11784
	primary->rotation = BIT(DRM_ROTATE_0);
11427
	if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4)
11785
	if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4)
11428
		primary->plane = !pipe;
11786
		primary->plane = !pipe;
11429
 
11787
 
11430
	if (INTEL_INFO(dev)->gen <= 3) {
11788
	if (INTEL_INFO(dev)->gen <= 3) {
11431
		intel_primary_formats = intel_primary_formats_gen2;
11789
		intel_primary_formats = intel_primary_formats_gen2;
11432
		num_formats = ARRAY_SIZE(intel_primary_formats_gen2);
11790
		num_formats = ARRAY_SIZE(intel_primary_formats_gen2);
11433
	} else {
11791
	} else {
11434
		intel_primary_formats = intel_primary_formats_gen4;
11792
		intel_primary_formats = intel_primary_formats_gen4;
11435
		num_formats = ARRAY_SIZE(intel_primary_formats_gen4);
11793
		num_formats = ARRAY_SIZE(intel_primary_formats_gen4);
11436
	}
11794
	}
11437
 
11795
 
11438
	drm_universal_plane_init(dev, &primary->base, 0,
11796
	drm_universal_plane_init(dev, &primary->base, 0,
11439
				 &intel_primary_plane_funcs,
11797
				 &intel_primary_plane_funcs,
11440
				 intel_primary_formats, num_formats,
11798
				 intel_primary_formats, num_formats,
11441
				 DRM_PLANE_TYPE_PRIMARY);
11799
				 DRM_PLANE_TYPE_PRIMARY);
-
 
11800
 
-
 
11801
	if (INTEL_INFO(dev)->gen >= 4) {
-
 
11802
		if (!dev->mode_config.rotation_property)
-
 
11803
			dev->mode_config.rotation_property =
-
 
11804
				drm_mode_create_rotation_property(dev,
-
 
11805
							BIT(DRM_ROTATE_0) |
-
 
11806
							BIT(DRM_ROTATE_180));
-
 
11807
		if (dev->mode_config.rotation_property)
-
 
11808
			drm_object_attach_property(&primary->base.base,
-
 
11809
				dev->mode_config.rotation_property,
-
 
11810
				primary->rotation);
-
 
11811
	}
-
 
11812
 
11442
	return &primary->base;
11813
	return &primary->base;
11443
}
11814
}
11444
 
11815
 
11445
static int
11816
static int
11446
intel_cursor_plane_disable(struct drm_plane *plane)
11817
intel_cursor_plane_disable(struct drm_plane *plane)
11447
{
11818
{
11448
	if (!plane->fb)
11819
	if (!plane->fb)
11449
		return 0;
11820
		return 0;
11450
 
11821
 
11451
	BUG_ON(!plane->crtc);
11822
	BUG_ON(!plane->crtc);
11452
 
11823
 
11453
	return intel_crtc_cursor_set_obj(plane->crtc, NULL, 0, 0);
11824
	return intel_crtc_cursor_set_obj(plane->crtc, NULL, 0, 0);
11454
}
11825
}
11455
 
11826
 
11456
static int
11827
static int
11457
intel_cursor_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
11828
intel_check_cursor_plane(struct drm_plane *plane,
11458
			  struct drm_framebuffer *fb, int crtc_x, int crtc_y,
-
 
11459
			  unsigned int crtc_w, unsigned int crtc_h,
-
 
11460
			  uint32_t src_x, uint32_t src_y,
-
 
11461
			  uint32_t src_w, uint32_t src_h)
11829
			 struct intel_plane_state *state)
11462
{
11830
{
11463
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11831
	struct drm_crtc *crtc = state->crtc;
11464
	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
11832
	struct drm_device *dev = crtc->dev;
11465
	struct drm_i915_gem_object *obj = intel_fb->obj;
11833
	struct drm_framebuffer *fb = state->fb;
11466
	struct drm_rect dest = {
11834
	struct drm_rect *dest = &state->dst;
11467
		/* integer pixels */
-
 
11468
		.x1 = crtc_x,
-
 
11469
		.y1 = crtc_y,
-
 
11470
		.x2 = crtc_x + crtc_w,
-
 
11471
		.y2 = crtc_y + crtc_h,
-
 
11472
	};
-
 
11473
	struct drm_rect src = {
11835
	struct drm_rect *src = &state->src;
11474
		/* 16.16 fixed point */
-
 
11475
		.x1 = src_x,
-
 
11476
		.y1 = src_y,
-
 
11477
		.x2 = src_x + src_w,
-
 
11478
		.y2 = src_y + src_h,
-
 
11479
	};
-
 
11480
	const struct drm_rect clip = {
11836
	const struct drm_rect *clip = &state->clip;
11481
		/* integer pixels */
-
 
11482
		.x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0,
11837
	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
11483
		.y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0,
11838
	int crtc_w, crtc_h;
11484
	};
-
 
11485
	bool visible;
11839
	unsigned stride;
11486
	int ret;
11840
	int ret;
11487
 
11841
 
11488
	ret = drm_plane_helper_check_update(plane, crtc, fb,
11842
	ret = drm_plane_helper_check_update(plane, crtc, fb,
11489
					    &src, &dest, &clip,
11843
					    src, dest, clip,
11490
					    DRM_PLANE_HELPER_NO_SCALING,
11844
					    DRM_PLANE_HELPER_NO_SCALING,
11491
					    DRM_PLANE_HELPER_NO_SCALING,
11845
					    DRM_PLANE_HELPER_NO_SCALING,
11492
					    true, true, &visible);
11846
					    true, true, &state->visible);
11493
	if (ret)
11847
	if (ret)
11494
		return ret;
11848
		return ret;
-
 
11849
 
-
 
11850
 
-
 
11851
	/* if we want to turn off the cursor ignore width and height */
-
 
11852
	if (!obj)
-
 
11853
		return 0;
-
 
11854
 
-
 
11855
	/* Check for which cursor types we support */
-
 
11856
	crtc_w = drm_rect_width(&state->orig_dst);
-
 
11857
	crtc_h = drm_rect_height(&state->orig_dst);
-
 
11858
	if (!cursor_size_ok(dev, crtc_w, crtc_h)) {
-
 
11859
		DRM_DEBUG("Cursor dimension not supported\n");
-
 
11860
		return -EINVAL;
-
 
11861
	}
-
 
11862
 
-
 
11863
	stride = roundup_pow_of_two(crtc_w) * 4;
-
 
11864
	if (obj->base.size < stride * crtc_h) {
-
 
11865
		DRM_DEBUG_KMS("buffer is too small\n");
-
 
11866
		return -ENOMEM;
-
 
11867
	}
-
 
11868
 
-
 
11869
	if (fb == crtc->cursor->fb)
-
 
11870
		return 0;
-
 
11871
 
-
 
11872
	/* we only need to pin inside GTT if cursor is non-phy */
-
 
11873
	mutex_lock(&dev->struct_mutex);
-
 
11874
	if (!INTEL_INFO(dev)->cursor_needs_physical && obj->tiling_mode) {
-
 
11875
		DRM_DEBUG_KMS("cursor cannot be tiled\n");
-
 
11876
		ret = -EINVAL;
-
 
11877
	}
-
 
11878
	mutex_unlock(&dev->struct_mutex);
-
 
11879
 
-
 
11880
	return ret;
-
 
11881
}
-
 
11882
 
-
 
11883
static int
-
 
11884
intel_commit_cursor_plane(struct drm_plane *plane,
-
 
11885
			  struct intel_plane_state *state)
-
 
11886
{
-
 
11887
	struct drm_crtc *crtc = state->crtc;
-
 
11888
	struct drm_framebuffer *fb = state->fb;
-
 
11889
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-
 
11890
	struct intel_plane *intel_plane = to_intel_plane(plane);
-
 
11891
	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
-
 
11892
	struct drm_i915_gem_object *obj = intel_fb->obj;
-
 
11893
	int crtc_w, crtc_h;
11495
 
11894
 
11496
	crtc->cursor_x = crtc_x;
11895
	crtc->cursor_x = state->orig_dst.x1;
-
 
11896
	crtc->cursor_y = state->orig_dst.y1;
-
 
11897
 
-
 
11898
	intel_plane->crtc_x = state->orig_dst.x1;
-
 
11899
	intel_plane->crtc_y = state->orig_dst.y1;
-
 
11900
	intel_plane->crtc_w = drm_rect_width(&state->orig_dst);
-
 
11901
	intel_plane->crtc_h = drm_rect_height(&state->orig_dst);
-
 
11902
	intel_plane->src_x = state->orig_src.x1;
-
 
11903
	intel_plane->src_y = state->orig_src.y1;
-
 
11904
	intel_plane->src_w = drm_rect_width(&state->orig_src);
-
 
11905
	intel_plane->src_h = drm_rect_height(&state->orig_src);
-
 
11906
	intel_plane->obj = obj;
11497
	crtc->cursor_y = crtc_y;
11907
 
-
 
11908
	if (fb != crtc->cursor->fb) {
-
 
11909
		crtc_w = drm_rect_width(&state->orig_dst);
11498
	if (fb != crtc->cursor->fb) {
11910
		crtc_h = drm_rect_height(&state->orig_dst);
11499
		return intel_crtc_cursor_set_obj(crtc, obj, crtc_w, crtc_h);
11911
		return intel_crtc_cursor_set_obj(crtc, obj, crtc_w, crtc_h);
11500
	} else {
11912
	} else {
-
 
11913
		intel_crtc_update_cursor(crtc, state->visible);
-
 
11914
 
11501
		intel_crtc_update_cursor(crtc, visible);
11915
 
11502
		return 0;
11916
		return 0;
11503
	}
11917
	}
11504
}
11918
}
-
 
11919
 
-
 
11920
static int
-
 
11921
intel_cursor_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
-
 
11922
			  struct drm_framebuffer *fb, int crtc_x, int crtc_y,
-
 
11923
			  unsigned int crtc_w, unsigned int crtc_h,
-
 
11924
			  uint32_t src_x, uint32_t src_y,
-
 
11925
			  uint32_t src_w, uint32_t src_h)
-
 
11926
{
-
 
11927
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-
 
11928
	struct intel_plane_state state;
-
 
11929
	int ret;
-
 
11930
 
-
 
11931
	state.crtc = crtc;
-
 
11932
	state.fb = fb;
-
 
11933
 
-
 
11934
	/* sample coordinates in 16.16 fixed point */
-
 
11935
	state.src.x1 = src_x;
-
 
11936
	state.src.x2 = src_x + src_w;
-
 
11937
	state.src.y1 = src_y;
-
 
11938
	state.src.y2 = src_y + src_h;
-
 
11939
 
-
 
11940
	/* integer pixels */
-
 
11941
	state.dst.x1 = crtc_x;
-
 
11942
	state.dst.x2 = crtc_x + crtc_w;
-
 
11943
	state.dst.y1 = crtc_y;
-
 
11944
	state.dst.y2 = crtc_y + crtc_h;
-
 
11945
 
-
 
11946
	state.clip.x1 = 0;
-
 
11947
	state.clip.y1 = 0;
-
 
11948
	state.clip.x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0;
-
 
11949
	state.clip.y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0;
-
 
11950
 
-
 
11951
	state.orig_src = state.src;
-
 
11952
	state.orig_dst = state.dst;
-
 
11953
 
-
 
11954
	ret = intel_check_cursor_plane(plane, &state);
-
 
11955
	if (ret)
-
 
11956
		return ret;
-
 
11957
 
-
 
11958
	return intel_commit_cursor_plane(plane, &state);
-
 
11959
}
-
 
11960
 
11505
static const struct drm_plane_funcs intel_cursor_plane_funcs = {
11961
static const struct drm_plane_funcs intel_cursor_plane_funcs = {
11506
	.update_plane = intel_cursor_plane_update,
11962
	.update_plane = intel_cursor_plane_update,
11507
	.disable_plane = intel_cursor_plane_disable,
11963
	.disable_plane = intel_cursor_plane_disable,
11508
	.destroy = intel_plane_destroy,
11964
	.destroy = intel_plane_destroy,
-
 
11965
	.set_property = intel_plane_set_property,
11509
};
11966
};
11510
 
11967
 
11511
static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev,
11968
static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev,
11512
						   int pipe)
11969
						   int pipe)
11513
{
11970
{
11514
	struct intel_plane *cursor;
11971
	struct intel_plane *cursor;
11515
 
11972
 
11516
	cursor = kzalloc(sizeof(*cursor), GFP_KERNEL);
11973
	cursor = kzalloc(sizeof(*cursor), GFP_KERNEL);
11517
	if (cursor == NULL)
11974
	if (cursor == NULL)
11518
		return NULL;
11975
		return NULL;
11519
 
11976
 
11520
	cursor->can_scale = false;
11977
	cursor->can_scale = false;
11521
	cursor->max_downscale = 1;
11978
	cursor->max_downscale = 1;
11522
	cursor->pipe = pipe;
11979
	cursor->pipe = pipe;
11523
	cursor->plane = pipe;
11980
	cursor->plane = pipe;
-
 
11981
	cursor->rotation = BIT(DRM_ROTATE_0);
11524
 
11982
 
11525
	drm_universal_plane_init(dev, &cursor->base, 0,
11983
	drm_universal_plane_init(dev, &cursor->base, 0,
11526
				 &intel_cursor_plane_funcs,
11984
				 &intel_cursor_plane_funcs,
11527
				 intel_cursor_formats,
11985
				 intel_cursor_formats,
11528
				 ARRAY_SIZE(intel_cursor_formats),
11986
				 ARRAY_SIZE(intel_cursor_formats),
11529
				 DRM_PLANE_TYPE_CURSOR);
11987
				 DRM_PLANE_TYPE_CURSOR);
-
 
11988
 
-
 
11989
	if (INTEL_INFO(dev)->gen >= 4) {
-
 
11990
		if (!dev->mode_config.rotation_property)
-
 
11991
			dev->mode_config.rotation_property =
-
 
11992
				drm_mode_create_rotation_property(dev,
-
 
11993
							BIT(DRM_ROTATE_0) |
-
 
11994
							BIT(DRM_ROTATE_180));
-
 
11995
		if (dev->mode_config.rotation_property)
-
 
11996
			drm_object_attach_property(&cursor->base.base,
-
 
11997
				dev->mode_config.rotation_property,
-
 
11998
				cursor->rotation);
-
 
11999
	}
-
 
12000
 
11530
	return &cursor->base;
12001
	return &cursor->base;
11531
}
12002
}
11532
 
12003
 
11533
static void intel_crtc_init(struct drm_device *dev, int pipe)
12004
static void intel_crtc_init(struct drm_device *dev, int pipe)
11534
{
12005
{
11535
	struct drm_i915_private *dev_priv = dev->dev_private;
12006
	struct drm_i915_private *dev_priv = dev->dev_private;
11536
	struct intel_crtc *intel_crtc;
12007
	struct intel_crtc *intel_crtc;
11537
	struct drm_plane *primary = NULL;
12008
	struct drm_plane *primary = NULL;
11538
	struct drm_plane *cursor = NULL;
12009
	struct drm_plane *cursor = NULL;
11539
	int i, ret;
12010
	int i, ret;
11540
 
12011
 
11541
	intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL);
12012
	intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL);
11542
	if (intel_crtc == NULL)
12013
	if (intel_crtc == NULL)
11543
		return;
12014
		return;
11544
 
12015
 
11545
	primary = intel_primary_plane_create(dev, pipe);
12016
	primary = intel_primary_plane_create(dev, pipe);
11546
	if (!primary)
12017
	if (!primary)
11547
		goto fail;
12018
		goto fail;
11548
 
12019
 
11549
	cursor = intel_cursor_plane_create(dev, pipe);
12020
	cursor = intel_cursor_plane_create(dev, pipe);
11550
	if (!cursor)
12021
	if (!cursor)
11551
		goto fail;
12022
		goto fail;
11552
 
12023
 
11553
	ret = drm_crtc_init_with_planes(dev, &intel_crtc->base, primary,
12024
	ret = drm_crtc_init_with_planes(dev, &intel_crtc->base, primary,
11554
					cursor, &intel_crtc_funcs);
12025
					cursor, &intel_crtc_funcs);
11555
	if (ret)
12026
	if (ret)
11556
		goto fail;
12027
		goto fail;
11557
 
12028
 
11558
	drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256);
12029
	drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256);
11559
	for (i = 0; i < 256; i++) {
12030
	for (i = 0; i < 256; i++) {
11560
		intel_crtc->lut_r[i] = i;
12031
		intel_crtc->lut_r[i] = i;
11561
		intel_crtc->lut_g[i] = i;
12032
		intel_crtc->lut_g[i] = i;
11562
		intel_crtc->lut_b[i] = i;
12033
		intel_crtc->lut_b[i] = i;
11563
	}
12034
	}
11564
 
12035
 
11565
	/*
12036
	/*
11566
	 * On gen2/3 only plane A can do fbc, but the panel fitter and lvds port
12037
	 * On gen2/3 only plane A can do fbc, but the panel fitter and lvds port
11567
	 * is hooked to pipe B. Hence we want plane A feeding pipe B.
12038
	 * is hooked to pipe B. Hence we want plane A feeding pipe B.
11568
	 */
12039
	 */
11569
	intel_crtc->pipe = pipe;
12040
	intel_crtc->pipe = pipe;
11570
	intel_crtc->plane = pipe;
12041
	intel_crtc->plane = pipe;
11571
	if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4) {
12042
	if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4) {
11572
		DRM_DEBUG_KMS("swapping pipes & planes for FBC\n");
12043
		DRM_DEBUG_KMS("swapping pipes & planes for FBC\n");
11573
		intel_crtc->plane = !pipe;
12044
		intel_crtc->plane = !pipe;
11574
	}
12045
	}
11575
 
12046
 
11576
	intel_crtc->cursor_base = ~0;
12047
	intel_crtc->cursor_base = ~0;
11577
	intel_crtc->cursor_cntl = ~0;
12048
	intel_crtc->cursor_cntl = ~0;
11578
 
-
 
11579
	init_waitqueue_head(&intel_crtc->vbl_wait);
12049
	intel_crtc->cursor_size = ~0;
11580
 
12050
 
11581
	BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
12051
	BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
11582
	       dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL);
12052
	       dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL);
11583
	dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
12053
	dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
11584
	dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
12054
	dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
11585
 
12055
 
11586
	drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
12056
	drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
11587
 
12057
 
11588
	WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe);
12058
	WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe);
11589
	return;
12059
	return;
11590
 
12060
 
11591
fail:
12061
fail:
11592
	if (primary)
12062
	if (primary)
11593
		drm_plane_cleanup(primary);
12063
		drm_plane_cleanup(primary);
11594
	if (cursor)
12064
	if (cursor)
11595
		drm_plane_cleanup(cursor);
12065
		drm_plane_cleanup(cursor);
11596
	kfree(intel_crtc);
12066
	kfree(intel_crtc);
11597
}
12067
}
11598
 
12068
 
11599
enum pipe intel_get_pipe_from_connector(struct intel_connector *connector)
12069
enum pipe intel_get_pipe_from_connector(struct intel_connector *connector)
11600
{
12070
{
11601
	struct drm_encoder *encoder = connector->base.encoder;
12071
	struct drm_encoder *encoder = connector->base.encoder;
11602
	struct drm_device *dev = connector->base.dev;
12072
	struct drm_device *dev = connector->base.dev;
11603
 
12073
 
11604
	WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
12074
	WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
11605
 
12075
 
11606
	if (!encoder)
12076
	if (!encoder || WARN_ON(!encoder->crtc))
11607
		return INVALID_PIPE;
12077
		return INVALID_PIPE;
11608
 
12078
 
11609
	return to_intel_crtc(encoder->crtc)->pipe;
12079
	return to_intel_crtc(encoder->crtc)->pipe;
11610
}
12080
}
11611
 
12081
 
11612
int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
12082
int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
11613
				struct drm_file *file)
12083
				struct drm_file *file)
11614
{
12084
{
11615
	struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
12085
	struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
11616
	struct drm_crtc *drmmode_crtc;
12086
	struct drm_crtc *drmmode_crtc;
11617
	struct intel_crtc *crtc;
12087
	struct intel_crtc *crtc;
11618
 
12088
 
11619
	if (!drm_core_check_feature(dev, DRIVER_MODESET))
12089
	if (!drm_core_check_feature(dev, DRIVER_MODESET))
11620
		return -ENODEV;
12090
		return -ENODEV;
11621
 
12091
 
11622
	drmmode_crtc = drm_crtc_find(dev, pipe_from_crtc_id->crtc_id);
12092
	drmmode_crtc = drm_crtc_find(dev, pipe_from_crtc_id->crtc_id);
11623
 
12093
 
11624
	if (!drmmode_crtc) {
12094
	if (!drmmode_crtc) {
11625
		DRM_ERROR("no such CRTC id\n");
12095
		DRM_ERROR("no such CRTC id\n");
11626
		return -ENOENT;
12096
		return -ENOENT;
11627
	}
12097
	}
11628
 
12098
 
11629
	crtc = to_intel_crtc(drmmode_crtc);
12099
	crtc = to_intel_crtc(drmmode_crtc);
11630
	pipe_from_crtc_id->pipe = crtc->pipe;
12100
	pipe_from_crtc_id->pipe = crtc->pipe;
11631
 
12101
 
11632
	return 0;
12102
	return 0;
11633
}
12103
}
11634
 
12104
 
11635
static int intel_encoder_clones(struct intel_encoder *encoder)
12105
static int intel_encoder_clones(struct intel_encoder *encoder)
11636
{
12106
{
11637
	struct drm_device *dev = encoder->base.dev;
12107
	struct drm_device *dev = encoder->base.dev;
11638
	struct intel_encoder *source_encoder;
12108
	struct intel_encoder *source_encoder;
11639
	int index_mask = 0;
12109
	int index_mask = 0;
11640
	int entry = 0;
12110
	int entry = 0;
11641
 
12111
 
11642
	list_for_each_entry(source_encoder,
-
 
11643
			    &dev->mode_config.encoder_list, base.head) {
12112
	for_each_intel_encoder(dev, source_encoder) {
11644
		if (encoders_cloneable(encoder, source_encoder))
12113
		if (encoders_cloneable(encoder, source_encoder))
11645
			index_mask |= (1 << entry);
12114
			index_mask |= (1 << entry);
11646
 
12115
 
11647
		entry++;
12116
		entry++;
11648
	}
12117
	}
11649
 
12118
 
11650
	return index_mask;
12119
	return index_mask;
11651
}
12120
}
11652
 
12121
 
11653
static bool has_edp_a(struct drm_device *dev)
12122
static bool has_edp_a(struct drm_device *dev)
11654
{
12123
{
11655
	struct drm_i915_private *dev_priv = dev->dev_private;
12124
	struct drm_i915_private *dev_priv = dev->dev_private;
11656
 
12125
 
11657
	if (!IS_MOBILE(dev))
12126
	if (!IS_MOBILE(dev))
11658
		return false;
12127
		return false;
11659
 
12128
 
11660
	if ((I915_READ(DP_A) & DP_DETECTED) == 0)
12129
	if ((I915_READ(DP_A) & DP_DETECTED) == 0)
11661
		return false;
12130
		return false;
11662
 
12131
 
11663
	if (IS_GEN5(dev) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE))
12132
	if (IS_GEN5(dev) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE))
11664
		return false;
12133
		return false;
11665
 
12134
 
11666
	return true;
12135
	return true;
11667
}
12136
}
11668
 
12137
 
11669
const char *intel_output_name(int output)
12138
const char *intel_output_name(int output)
11670
{
12139
{
11671
	static const char *names[] = {
12140
	static const char *names[] = {
11672
		[INTEL_OUTPUT_UNUSED] = "Unused",
12141
		[INTEL_OUTPUT_UNUSED] = "Unused",
11673
		[INTEL_OUTPUT_ANALOG] = "Analog",
12142
		[INTEL_OUTPUT_ANALOG] = "Analog",
11674
		[INTEL_OUTPUT_DVO] = "DVO",
12143
		[INTEL_OUTPUT_DVO] = "DVO",
11675
		[INTEL_OUTPUT_SDVO] = "SDVO",
12144
		[INTEL_OUTPUT_SDVO] = "SDVO",
11676
		[INTEL_OUTPUT_LVDS] = "LVDS",
12145
		[INTEL_OUTPUT_LVDS] = "LVDS",
11677
		[INTEL_OUTPUT_TVOUT] = "TV",
12146
		[INTEL_OUTPUT_TVOUT] = "TV",
11678
		[INTEL_OUTPUT_HDMI] = "HDMI",
12147
		[INTEL_OUTPUT_HDMI] = "HDMI",
11679
		[INTEL_OUTPUT_DISPLAYPORT] = "DisplayPort",
12148
		[INTEL_OUTPUT_DISPLAYPORT] = "DisplayPort",
11680
		[INTEL_OUTPUT_EDP] = "eDP",
12149
		[INTEL_OUTPUT_EDP] = "eDP",
11681
		[INTEL_OUTPUT_DSI] = "DSI",
12150
		[INTEL_OUTPUT_DSI] = "DSI",
11682
		[INTEL_OUTPUT_UNKNOWN] = "Unknown",
12151
		[INTEL_OUTPUT_UNKNOWN] = "Unknown",
11683
	};
12152
	};
11684
 
12153
 
11685
	if (output < 0 || output >= ARRAY_SIZE(names) || !names[output])
12154
	if (output < 0 || output >= ARRAY_SIZE(names) || !names[output])
11686
		return "Invalid";
12155
		return "Invalid";
11687
 
12156
 
11688
	return names[output];
12157
	return names[output];
11689
}
12158
}
11690
 
12159
 
11691
static bool intel_crt_present(struct drm_device *dev)
12160
static bool intel_crt_present(struct drm_device *dev)
11692
{
12161
{
11693
	struct drm_i915_private *dev_priv = dev->dev_private;
12162
	struct drm_i915_private *dev_priv = dev->dev_private;
11694
 
12163
 
-
 
12164
	if (INTEL_INFO(dev)->gen >= 9)
-
 
12165
		return false;
-
 
12166
 
11695
	if (IS_ULT(dev))
12167
	if (IS_HSW_ULT(dev) || IS_BDW_ULT(dev))
11696
		return false;
12168
		return false;
11697
 
12169
 
11698
	if (IS_CHERRYVIEW(dev))
12170
	if (IS_CHERRYVIEW(dev))
11699
		return false;
12171
		return false;
11700
 
12172
 
11701
	if (IS_VALLEYVIEW(dev) && !dev_priv->vbt.int_crt_support)
12173
	if (IS_VALLEYVIEW(dev) && !dev_priv->vbt.int_crt_support)
11702
		return false;
12174
		return false;
11703
 
12175
 
11704
	return true;
12176
	return true;
11705
}
12177
}
11706
 
12178
 
11707
static void intel_setup_outputs(struct drm_device *dev)
12179
static void intel_setup_outputs(struct drm_device *dev)
11708
{
12180
{
11709
	struct drm_i915_private *dev_priv = dev->dev_private;
12181
	struct drm_i915_private *dev_priv = dev->dev_private;
11710
	struct intel_encoder *encoder;
12182
	struct intel_encoder *encoder;
11711
	bool dpd_is_edp = false;
12183
	bool dpd_is_edp = false;
11712
 
12184
 
11713
	intel_lvds_init(dev);
12185
	intel_lvds_init(dev);
11714
 
12186
 
11715
	if (intel_crt_present(dev))
12187
	if (intel_crt_present(dev))
11716
	intel_crt_init(dev);
12188
	intel_crt_init(dev);
11717
 
12189
 
11718
	if (HAS_DDI(dev)) {
12190
	if (HAS_DDI(dev)) {
11719
		int found;
12191
		int found;
11720
 
12192
 
11721
		/* Haswell uses DDI functions to detect digital outputs */
12193
		/* Haswell uses DDI functions to detect digital outputs */
11722
		found = I915_READ(DDI_BUF_CTL_A) & DDI_INIT_DISPLAY_DETECTED;
12194
		found = I915_READ(DDI_BUF_CTL_A) & DDI_INIT_DISPLAY_DETECTED;
11723
		/* DDI A only supports eDP */
12195
		/* DDI A only supports eDP */
11724
		if (found)
12196
		if (found)
11725
			intel_ddi_init(dev, PORT_A);
12197
			intel_ddi_init(dev, PORT_A);
11726
 
12198
 
11727
		/* DDI B, C and D detection is indicated by the SFUSE_STRAP
12199
		/* DDI B, C and D detection is indicated by the SFUSE_STRAP
11728
		 * register */
12200
		 * register */
11729
		found = I915_READ(SFUSE_STRAP);
12201
		found = I915_READ(SFUSE_STRAP);
11730
 
12202
 
11731
		if (found & SFUSE_STRAP_DDIB_DETECTED)
12203
		if (found & SFUSE_STRAP_DDIB_DETECTED)
11732
			intel_ddi_init(dev, PORT_B);
12204
			intel_ddi_init(dev, PORT_B);
11733
		if (found & SFUSE_STRAP_DDIC_DETECTED)
12205
		if (found & SFUSE_STRAP_DDIC_DETECTED)
11734
			intel_ddi_init(dev, PORT_C);
12206
			intel_ddi_init(dev, PORT_C);
11735
		if (found & SFUSE_STRAP_DDID_DETECTED)
12207
		if (found & SFUSE_STRAP_DDID_DETECTED)
11736
			intel_ddi_init(dev, PORT_D);
12208
			intel_ddi_init(dev, PORT_D);
11737
	} else if (HAS_PCH_SPLIT(dev)) {
12209
	} else if (HAS_PCH_SPLIT(dev)) {
11738
		int found;
12210
		int found;
11739
		dpd_is_edp = intel_dp_is_edp(dev, PORT_D);
12211
		dpd_is_edp = intel_dp_is_edp(dev, PORT_D);
11740
 
12212
 
11741
		if (has_edp_a(dev))
12213
		if (has_edp_a(dev))
11742
			intel_dp_init(dev, DP_A, PORT_A);
12214
			intel_dp_init(dev, DP_A, PORT_A);
11743
 
12215
 
11744
		if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) {
12216
		if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) {
11745
			/* PCH SDVOB multiplex with HDMIB */
12217
			/* PCH SDVOB multiplex with HDMIB */
11746
			found = intel_sdvo_init(dev, PCH_SDVOB, true);
12218
			found = intel_sdvo_init(dev, PCH_SDVOB, true);
11747
			if (!found)
12219
			if (!found)
11748
				intel_hdmi_init(dev, PCH_HDMIB, PORT_B);
12220
				intel_hdmi_init(dev, PCH_HDMIB, PORT_B);
11749
			if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
12221
			if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
11750
				intel_dp_init(dev, PCH_DP_B, PORT_B);
12222
				intel_dp_init(dev, PCH_DP_B, PORT_B);
11751
		}
12223
		}
11752
 
12224
 
11753
		if (I915_READ(PCH_HDMIC) & SDVO_DETECTED)
12225
		if (I915_READ(PCH_HDMIC) & SDVO_DETECTED)
11754
			intel_hdmi_init(dev, PCH_HDMIC, PORT_C);
12226
			intel_hdmi_init(dev, PCH_HDMIC, PORT_C);
11755
 
12227
 
11756
		if (!dpd_is_edp && I915_READ(PCH_HDMID) & SDVO_DETECTED)
12228
		if (!dpd_is_edp && I915_READ(PCH_HDMID) & SDVO_DETECTED)
11757
			intel_hdmi_init(dev, PCH_HDMID, PORT_D);
12229
			intel_hdmi_init(dev, PCH_HDMID, PORT_D);
11758
 
12230
 
11759
		if (I915_READ(PCH_DP_C) & DP_DETECTED)
12231
		if (I915_READ(PCH_DP_C) & DP_DETECTED)
11760
			intel_dp_init(dev, PCH_DP_C, PORT_C);
12232
			intel_dp_init(dev, PCH_DP_C, PORT_C);
11761
 
12233
 
11762
		if (I915_READ(PCH_DP_D) & DP_DETECTED)
12234
		if (I915_READ(PCH_DP_D) & DP_DETECTED)
11763
			intel_dp_init(dev, PCH_DP_D, PORT_D);
12235
			intel_dp_init(dev, PCH_DP_D, PORT_D);
11764
	} else if (IS_VALLEYVIEW(dev)) {
12236
	} else if (IS_VALLEYVIEW(dev)) {
-
 
12237
		/*
-
 
12238
		 * The DP_DETECTED bit is the latched state of the DDC
-
 
12239
		 * SDA pin at boot. However since eDP doesn't require DDC
-
 
12240
		 * (no way to plug in a DP->HDMI dongle) the DDC pins for
-
 
12241
		 * eDP ports may have been muxed to an alternate function.
-
 
12242
		 * Thus we can't rely on the DP_DETECTED bit alone to detect
-
 
12243
		 * eDP ports. Consult the VBT as well as DP_DETECTED to
-
 
12244
		 * detect eDP ports.
-
 
12245
		 */
11765
		if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIB) & SDVO_DETECTED) {
12246
		if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIB) & SDVO_DETECTED)
11766
			intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIB,
12247
			intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIB,
11767
					PORT_B);
12248
					PORT_B);
11768
			if (I915_READ(VLV_DISPLAY_BASE + DP_B) & DP_DETECTED)
12249
		if (I915_READ(VLV_DISPLAY_BASE + DP_B) & DP_DETECTED ||
-
 
12250
		    intel_dp_is_edp(dev, PORT_B))
11769
				intel_dp_init(dev, VLV_DISPLAY_BASE + DP_B, PORT_B);
12251
				intel_dp_init(dev, VLV_DISPLAY_BASE + DP_B, PORT_B);
11770
		}
-
 
11771
 
12252
 
11772
		if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIC) & SDVO_DETECTED) {
12253
		if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIC) & SDVO_DETECTED)
11773
			intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIC,
12254
			intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIC,
11774
					PORT_C);
12255
					PORT_C);
-
 
12256
		if (I915_READ(VLV_DISPLAY_BASE + DP_C) & DP_DETECTED ||
11775
		if (I915_READ(VLV_DISPLAY_BASE + DP_C) & DP_DETECTED)
12257
		    intel_dp_is_edp(dev, PORT_C))
11776
				intel_dp_init(dev, VLV_DISPLAY_BASE + DP_C, PORT_C);
-
 
11777
		}
12258
				intel_dp_init(dev, VLV_DISPLAY_BASE + DP_C, PORT_C);
11778
 
12259
 
11779
		if (IS_CHERRYVIEW(dev)) {
12260
		if (IS_CHERRYVIEW(dev)) {
11780
			if (I915_READ(VLV_DISPLAY_BASE + CHV_HDMID) & SDVO_DETECTED) {
12261
			if (I915_READ(VLV_DISPLAY_BASE + CHV_HDMID) & SDVO_DETECTED)
-
 
12262
				intel_hdmi_init(dev, VLV_DISPLAY_BASE + CHV_HDMID,
11781
				intel_hdmi_init(dev, VLV_DISPLAY_BASE + CHV_HDMID,
12263
						PORT_D);
11782
						PORT_D);
12264
			/* eDP not supported on port D, so don't check VBT */
11783
				if (I915_READ(VLV_DISPLAY_BASE + DP_D) & DP_DETECTED)
12265
				if (I915_READ(VLV_DISPLAY_BASE + DP_D) & DP_DETECTED)
11784
					intel_dp_init(dev, VLV_DISPLAY_BASE + DP_D, PORT_D);
12266
					intel_dp_init(dev, VLV_DISPLAY_BASE + DP_D, PORT_D);
11785
			}
12267
			}
11786
		}
-
 
11787
 
12268
 
11788
		intel_dsi_init(dev);
12269
		intel_dsi_init(dev);
11789
	} else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) {
12270
	} else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) {
11790
		bool found = false;
12271
		bool found = false;
11791
 
12272
 
11792
		if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
12273
		if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
11793
			DRM_DEBUG_KMS("probing SDVOB\n");
12274
			DRM_DEBUG_KMS("probing SDVOB\n");
11794
			found = intel_sdvo_init(dev, GEN3_SDVOB, true);
12275
			found = intel_sdvo_init(dev, GEN3_SDVOB, true);
11795
			if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) {
12276
			if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) {
11796
				DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
12277
				DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
11797
				intel_hdmi_init(dev, GEN4_HDMIB, PORT_B);
12278
				intel_hdmi_init(dev, GEN4_HDMIB, PORT_B);
11798
			}
12279
			}
11799
 
12280
 
11800
			if (!found && SUPPORTS_INTEGRATED_DP(dev))
12281
			if (!found && SUPPORTS_INTEGRATED_DP(dev))
11801
				intel_dp_init(dev, DP_B, PORT_B);
12282
				intel_dp_init(dev, DP_B, PORT_B);
11802
			}
12283
			}
11803
 
12284
 
11804
		/* Before G4X SDVOC doesn't have its own detect register */
12285
		/* Before G4X SDVOC doesn't have its own detect register */
11805
 
12286
 
11806
		if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
12287
		if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
11807
			DRM_DEBUG_KMS("probing SDVOC\n");
12288
			DRM_DEBUG_KMS("probing SDVOC\n");
11808
			found = intel_sdvo_init(dev, GEN3_SDVOC, false);
12289
			found = intel_sdvo_init(dev, GEN3_SDVOC, false);
11809
		}
12290
		}
11810
 
12291
 
11811
		if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) {
12292
		if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) {
11812
 
12293
 
11813
			if (SUPPORTS_INTEGRATED_HDMI(dev)) {
12294
			if (SUPPORTS_INTEGRATED_HDMI(dev)) {
11814
				DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
12295
				DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
11815
				intel_hdmi_init(dev, GEN4_HDMIC, PORT_C);
12296
				intel_hdmi_init(dev, GEN4_HDMIC, PORT_C);
11816
			}
12297
			}
11817
			if (SUPPORTS_INTEGRATED_DP(dev))
12298
			if (SUPPORTS_INTEGRATED_DP(dev))
11818
				intel_dp_init(dev, DP_C, PORT_C);
12299
				intel_dp_init(dev, DP_C, PORT_C);
11819
			}
12300
			}
11820
 
12301
 
11821
		if (SUPPORTS_INTEGRATED_DP(dev) &&
12302
		if (SUPPORTS_INTEGRATED_DP(dev) &&
11822
		    (I915_READ(DP_D) & DP_DETECTED))
12303
		    (I915_READ(DP_D) & DP_DETECTED))
11823
			intel_dp_init(dev, DP_D, PORT_D);
12304
			intel_dp_init(dev, DP_D, PORT_D);
11824
	} else if (IS_GEN2(dev))
12305
	} else if (IS_GEN2(dev))
11825
		intel_dvo_init(dev);
12306
		intel_dvo_init(dev);
11826
 
12307
 
11827
 
12308
 
11828
	intel_edp_psr_init(dev);
12309
	intel_psr_init(dev);
11829
 
12310
 
11830
	list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
12311
	for_each_intel_encoder(dev, encoder) {
11831
		encoder->base.possible_crtcs = encoder->crtc_mask;
12312
		encoder->base.possible_crtcs = encoder->crtc_mask;
11832
		encoder->base.possible_clones =
12313
		encoder->base.possible_clones =
11833
			intel_encoder_clones(encoder);
12314
			intel_encoder_clones(encoder);
11834
	}
12315
	}
11835
 
12316
 
11836
	intel_init_pch_refclk(dev);
12317
	intel_init_pch_refclk(dev);
11837
 
12318
 
11838
	drm_helper_move_panel_connectors_to_head(dev);
12319
	drm_helper_move_panel_connectors_to_head(dev);
11839
}
12320
}
11840
 
12321
 
11841
 
12322
 
11842
 
12323
 
11843
static const struct drm_framebuffer_funcs intel_fb_funcs = {
12324
static const struct drm_framebuffer_funcs intel_fb_funcs = {
11844
//	.destroy = intel_user_framebuffer_destroy,
12325
//	.destroy = intel_user_framebuffer_destroy,
11845
//	.create_handle = intel_user_framebuffer_create_handle,
12326
//	.create_handle = intel_user_framebuffer_create_handle,
11846
};
12327
};
11847
 
12328
 
11848
static int intel_framebuffer_init(struct drm_device *dev,
12329
static int intel_framebuffer_init(struct drm_device *dev,
11849
			   struct intel_framebuffer *intel_fb,
12330
			   struct intel_framebuffer *intel_fb,
11850
			   struct drm_mode_fb_cmd2 *mode_cmd,
12331
			   struct drm_mode_fb_cmd2 *mode_cmd,
11851
			   struct drm_i915_gem_object *obj)
12332
			   struct drm_i915_gem_object *obj)
11852
{
12333
{
11853
	int aligned_height;
12334
	int aligned_height;
11854
	int pitch_limit;
12335
	int pitch_limit;
11855
	int ret;
12336
	int ret;
11856
 
12337
 
11857
	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
12338
	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
11858
 
12339
 
11859
	if (obj->tiling_mode == I915_TILING_Y) {
12340
	if (obj->tiling_mode == I915_TILING_Y) {
11860
		DRM_DEBUG("hardware does not support tiling Y\n");
12341
		DRM_DEBUG("hardware does not support tiling Y\n");
11861
		return -EINVAL;
12342
		return -EINVAL;
11862
	}
12343
	}
11863
 
12344
 
11864
	if (mode_cmd->pitches[0] & 63) {
12345
	if (mode_cmd->pitches[0] & 63) {
11865
		DRM_DEBUG("pitch (%d) must be at least 64 byte aligned\n",
12346
		DRM_DEBUG("pitch (%d) must be at least 64 byte aligned\n",
11866
			  mode_cmd->pitches[0]);
12347
			  mode_cmd->pitches[0]);
11867
		return -EINVAL;
12348
		return -EINVAL;
11868
	}
12349
	}
11869
 
12350
 
11870
	if (INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev)) {
12351
	if (INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev)) {
11871
		pitch_limit = 32*1024;
12352
		pitch_limit = 32*1024;
11872
	} else if (INTEL_INFO(dev)->gen >= 4) {
12353
	} else if (INTEL_INFO(dev)->gen >= 4) {
11873
		if (obj->tiling_mode)
12354
		if (obj->tiling_mode)
11874
			pitch_limit = 16*1024;
12355
			pitch_limit = 16*1024;
11875
		else
12356
		else
11876
			pitch_limit = 32*1024;
12357
			pitch_limit = 32*1024;
11877
	} else if (INTEL_INFO(dev)->gen >= 3) {
12358
	} else if (INTEL_INFO(dev)->gen >= 3) {
11878
		if (obj->tiling_mode)
12359
		if (obj->tiling_mode)
11879
			pitch_limit = 8*1024;
12360
			pitch_limit = 8*1024;
11880
		else
12361
		else
11881
			pitch_limit = 16*1024;
12362
			pitch_limit = 16*1024;
11882
	} else
12363
	} else
11883
		/* XXX DSPC is limited to 4k tiled */
12364
		/* XXX DSPC is limited to 4k tiled */
11884
		pitch_limit = 8*1024;
12365
		pitch_limit = 8*1024;
11885
 
12366
 
11886
	if (mode_cmd->pitches[0] > pitch_limit) {
12367
	if (mode_cmd->pitches[0] > pitch_limit) {
11887
		DRM_DEBUG("%s pitch (%d) must be at less than %d\n",
12368
		DRM_DEBUG("%s pitch (%d) must be at less than %d\n",
11888
			  obj->tiling_mode ? "tiled" : "linear",
12369
			  obj->tiling_mode ? "tiled" : "linear",
11889
			  mode_cmd->pitches[0], pitch_limit);
12370
			  mode_cmd->pitches[0], pitch_limit);
11890
		return -EINVAL;
12371
		return -EINVAL;
11891
	}
12372
	}
11892
 
12373
 
11893
	if (obj->tiling_mode != I915_TILING_NONE &&
12374
	if (obj->tiling_mode != I915_TILING_NONE &&
11894
	    mode_cmd->pitches[0] != obj->stride) {
12375
	    mode_cmd->pitches[0] != obj->stride) {
11895
		DRM_DEBUG("pitch (%d) must match tiling stride (%d)\n",
12376
		DRM_DEBUG("pitch (%d) must match tiling stride (%d)\n",
11896
			  mode_cmd->pitches[0], obj->stride);
12377
			  mode_cmd->pitches[0], obj->stride);
11897
			return -EINVAL;
12378
			return -EINVAL;
11898
	}
12379
	}
11899
 
12380
 
11900
	/* Reject formats not supported by any plane early. */
12381
	/* Reject formats not supported by any plane early. */
11901
	switch (mode_cmd->pixel_format) {
12382
	switch (mode_cmd->pixel_format) {
11902
	case DRM_FORMAT_C8:
12383
	case DRM_FORMAT_C8:
11903
	case DRM_FORMAT_RGB565:
12384
	case DRM_FORMAT_RGB565:
11904
	case DRM_FORMAT_XRGB8888:
12385
	case DRM_FORMAT_XRGB8888:
11905
	case DRM_FORMAT_ARGB8888:
12386
	case DRM_FORMAT_ARGB8888:
11906
		break;
12387
		break;
11907
	case DRM_FORMAT_XRGB1555:
12388
	case DRM_FORMAT_XRGB1555:
11908
	case DRM_FORMAT_ARGB1555:
12389
	case DRM_FORMAT_ARGB1555:
11909
		if (INTEL_INFO(dev)->gen > 3) {
12390
		if (INTEL_INFO(dev)->gen > 3) {
11910
			DRM_DEBUG("unsupported pixel format: %s\n",
12391
			DRM_DEBUG("unsupported pixel format: %s\n",
11911
				  drm_get_format_name(mode_cmd->pixel_format));
12392
				  drm_get_format_name(mode_cmd->pixel_format));
11912
			return -EINVAL;
12393
			return -EINVAL;
11913
		}
12394
		}
11914
		break;
12395
		break;
11915
	case DRM_FORMAT_XBGR8888:
12396
	case DRM_FORMAT_XBGR8888:
11916
	case DRM_FORMAT_ABGR8888:
12397
	case DRM_FORMAT_ABGR8888:
11917
	case DRM_FORMAT_XRGB2101010:
12398
	case DRM_FORMAT_XRGB2101010:
11918
	case DRM_FORMAT_ARGB2101010:
12399
	case DRM_FORMAT_ARGB2101010:
11919
	case DRM_FORMAT_XBGR2101010:
12400
	case DRM_FORMAT_XBGR2101010:
11920
	case DRM_FORMAT_ABGR2101010:
12401
	case DRM_FORMAT_ABGR2101010:
11921
		if (INTEL_INFO(dev)->gen < 4) {
12402
		if (INTEL_INFO(dev)->gen < 4) {
11922
			DRM_DEBUG("unsupported pixel format: %s\n",
12403
			DRM_DEBUG("unsupported pixel format: %s\n",
11923
				  drm_get_format_name(mode_cmd->pixel_format));
12404
				  drm_get_format_name(mode_cmd->pixel_format));
11924
			return -EINVAL;
12405
			return -EINVAL;
11925
		}
12406
		}
11926
		break;
12407
		break;
11927
	case DRM_FORMAT_YUYV:
12408
	case DRM_FORMAT_YUYV:
11928
	case DRM_FORMAT_UYVY:
12409
	case DRM_FORMAT_UYVY:
11929
	case DRM_FORMAT_YVYU:
12410
	case DRM_FORMAT_YVYU:
11930
	case DRM_FORMAT_VYUY:
12411
	case DRM_FORMAT_VYUY:
11931
		if (INTEL_INFO(dev)->gen < 5) {
12412
		if (INTEL_INFO(dev)->gen < 5) {
11932
			DRM_DEBUG("unsupported pixel format: %s\n",
12413
			DRM_DEBUG("unsupported pixel format: %s\n",
11933
				  drm_get_format_name(mode_cmd->pixel_format));
12414
				  drm_get_format_name(mode_cmd->pixel_format));
11934
			return -EINVAL;
12415
			return -EINVAL;
11935
		}
12416
		}
11936
		break;
12417
		break;
11937
	default:
12418
	default:
11938
		DRM_DEBUG("unsupported pixel format: %s\n",
12419
		DRM_DEBUG("unsupported pixel format: %s\n",
11939
			  drm_get_format_name(mode_cmd->pixel_format));
12420
			  drm_get_format_name(mode_cmd->pixel_format));
11940
		return -EINVAL;
12421
		return -EINVAL;
11941
	}
12422
	}
11942
 
12423
 
11943
	/* FIXME need to adjust LINOFF/TILEOFF accordingly. */
12424
	/* FIXME need to adjust LINOFF/TILEOFF accordingly. */
11944
	if (mode_cmd->offsets[0] != 0)
12425
	if (mode_cmd->offsets[0] != 0)
11945
		return -EINVAL;
12426
		return -EINVAL;
11946
 
12427
 
11947
	aligned_height = intel_align_height(dev, mode_cmd->height,
12428
	aligned_height = intel_align_height(dev, mode_cmd->height,
11948
					    obj->tiling_mode);
12429
					    obj->tiling_mode);
11949
	/* FIXME drm helper for size checks (especially planar formats)? */
12430
	/* FIXME drm helper for size checks (especially planar formats)? */
11950
	if (obj->base.size < aligned_height * mode_cmd->pitches[0])
12431
	if (obj->base.size < aligned_height * mode_cmd->pitches[0])
11951
		return -EINVAL;
12432
		return -EINVAL;
11952
 
12433
 
11953
	drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd);
12434
	drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd);
11954
	intel_fb->obj = obj;
12435
	intel_fb->obj = obj;
11955
	intel_fb->obj->framebuffer_references++;
12436
	intel_fb->obj->framebuffer_references++;
11956
 
12437
 
11957
	ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
12438
	ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
11958
	if (ret) {
12439
	if (ret) {
11959
		DRM_ERROR("framebuffer init failed %d\n", ret);
12440
		DRM_ERROR("framebuffer init failed %d\n", ret);
11960
		return ret;
12441
		return ret;
11961
	}
12442
	}
11962
 
12443
 
11963
	return 0;
12444
	return 0;
11964
}
12445
}
11965
 
12446
 
11966
#ifndef CONFIG_DRM_I915_FBDEV
12447
#ifndef CONFIG_DRM_I915_FBDEV
11967
static inline void intel_fbdev_output_poll_changed(struct drm_device *dev)
12448
static inline void intel_fbdev_output_poll_changed(struct drm_device *dev)
11968
{
12449
{
11969
}
12450
}
11970
#endif
12451
#endif
11971
 
12452
 
11972
static const struct drm_mode_config_funcs intel_mode_funcs = {
12453
static const struct drm_mode_config_funcs intel_mode_funcs = {
11973
	.fb_create = NULL,
12454
	.fb_create = NULL,
11974
	.output_poll_changed = intel_fbdev_output_poll_changed,
12455
	.output_poll_changed = intel_fbdev_output_poll_changed,
11975
};
12456
};
11976
 
12457
 
11977
/* Set up chip specific display functions */
12458
/* Set up chip specific display functions */
11978
static void intel_init_display(struct drm_device *dev)
12459
static void intel_init_display(struct drm_device *dev)
11979
{
12460
{
11980
	struct drm_i915_private *dev_priv = dev->dev_private;
12461
	struct drm_i915_private *dev_priv = dev->dev_private;
11981
 
12462
 
11982
	if (HAS_PCH_SPLIT(dev) || IS_G4X(dev))
12463
	if (HAS_PCH_SPLIT(dev) || IS_G4X(dev))
11983
		dev_priv->display.find_dpll = g4x_find_best_dpll;
12464
		dev_priv->display.find_dpll = g4x_find_best_dpll;
11984
	else if (IS_CHERRYVIEW(dev))
12465
	else if (IS_CHERRYVIEW(dev))
11985
		dev_priv->display.find_dpll = chv_find_best_dpll;
12466
		dev_priv->display.find_dpll = chv_find_best_dpll;
11986
	else if (IS_VALLEYVIEW(dev))
12467
	else if (IS_VALLEYVIEW(dev))
11987
		dev_priv->display.find_dpll = vlv_find_best_dpll;
12468
		dev_priv->display.find_dpll = vlv_find_best_dpll;
11988
	else if (IS_PINEVIEW(dev))
12469
	else if (IS_PINEVIEW(dev))
11989
		dev_priv->display.find_dpll = pnv_find_best_dpll;
12470
		dev_priv->display.find_dpll = pnv_find_best_dpll;
11990
	else
12471
	else
11991
		dev_priv->display.find_dpll = i9xx_find_best_dpll;
12472
		dev_priv->display.find_dpll = i9xx_find_best_dpll;
11992
 
12473
 
11993
	if (HAS_DDI(dev)) {
12474
	if (HAS_DDI(dev)) {
11994
		dev_priv->display.get_pipe_config = haswell_get_pipe_config;
12475
		dev_priv->display.get_pipe_config = haswell_get_pipe_config;
11995
		dev_priv->display.get_plane_config = ironlake_get_plane_config;
12476
		dev_priv->display.get_plane_config = ironlake_get_plane_config;
11996
		dev_priv->display.crtc_mode_set = haswell_crtc_mode_set;
12477
		dev_priv->display.crtc_compute_clock =
-
 
12478
			haswell_crtc_compute_clock;
11997
		dev_priv->display.crtc_enable = haswell_crtc_enable;
12479
		dev_priv->display.crtc_enable = haswell_crtc_enable;
11998
		dev_priv->display.crtc_disable = haswell_crtc_disable;
12480
		dev_priv->display.crtc_disable = haswell_crtc_disable;
11999
		dev_priv->display.off = ironlake_crtc_off;
12481
		dev_priv->display.off = ironlake_crtc_off;
-
 
12482
		if (INTEL_INFO(dev)->gen >= 9)
-
 
12483
			dev_priv->display.update_primary_plane =
-
 
12484
				skylake_update_primary_plane;
-
 
12485
		else
12000
		dev_priv->display.update_primary_plane =
12486
		dev_priv->display.update_primary_plane =
12001
			ironlake_update_primary_plane;
12487
			ironlake_update_primary_plane;
12002
	} else if (HAS_PCH_SPLIT(dev)) {
12488
	} else if (HAS_PCH_SPLIT(dev)) {
12003
		dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
12489
		dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
12004
		dev_priv->display.get_plane_config = ironlake_get_plane_config;
12490
		dev_priv->display.get_plane_config = ironlake_get_plane_config;
12005
		dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set;
12491
		dev_priv->display.crtc_compute_clock =
-
 
12492
			ironlake_crtc_compute_clock;
12006
		dev_priv->display.crtc_enable = ironlake_crtc_enable;
12493
		dev_priv->display.crtc_enable = ironlake_crtc_enable;
12007
		dev_priv->display.crtc_disable = ironlake_crtc_disable;
12494
		dev_priv->display.crtc_disable = ironlake_crtc_disable;
12008
		dev_priv->display.off = ironlake_crtc_off;
12495
		dev_priv->display.off = ironlake_crtc_off;
12009
		dev_priv->display.update_primary_plane =
12496
		dev_priv->display.update_primary_plane =
12010
			ironlake_update_primary_plane;
12497
			ironlake_update_primary_plane;
12011
	} else if (IS_VALLEYVIEW(dev)) {
12498
	} else if (IS_VALLEYVIEW(dev)) {
12012
		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
12499
		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
12013
		dev_priv->display.get_plane_config = i9xx_get_plane_config;
12500
		dev_priv->display.get_plane_config = i9xx_get_plane_config;
12014
		dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
12501
		dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
12015
		dev_priv->display.crtc_enable = valleyview_crtc_enable;
12502
		dev_priv->display.crtc_enable = valleyview_crtc_enable;
12016
		dev_priv->display.crtc_disable = i9xx_crtc_disable;
12503
		dev_priv->display.crtc_disable = i9xx_crtc_disable;
12017
		dev_priv->display.off = i9xx_crtc_off;
12504
		dev_priv->display.off = i9xx_crtc_off;
12018
		dev_priv->display.update_primary_plane =
12505
		dev_priv->display.update_primary_plane =
12019
			i9xx_update_primary_plane;
12506
			i9xx_update_primary_plane;
12020
	} else {
12507
	} else {
12021
		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
12508
		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
12022
		dev_priv->display.get_plane_config = i9xx_get_plane_config;
12509
		dev_priv->display.get_plane_config = i9xx_get_plane_config;
12023
		dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
12510
		dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
12024
		dev_priv->display.crtc_enable = i9xx_crtc_enable;
12511
		dev_priv->display.crtc_enable = i9xx_crtc_enable;
12025
		dev_priv->display.crtc_disable = i9xx_crtc_disable;
12512
		dev_priv->display.crtc_disable = i9xx_crtc_disable;
12026
		dev_priv->display.off = i9xx_crtc_off;
12513
		dev_priv->display.off = i9xx_crtc_off;
12027
		dev_priv->display.update_primary_plane =
12514
		dev_priv->display.update_primary_plane =
12028
			i9xx_update_primary_plane;
12515
			i9xx_update_primary_plane;
12029
	}
12516
	}
12030
 
12517
 
12031
	/* Returns the core display clock speed */
12518
	/* Returns the core display clock speed */
12032
	if (IS_VALLEYVIEW(dev))
12519
	if (IS_VALLEYVIEW(dev))
12033
		dev_priv->display.get_display_clock_speed =
12520
		dev_priv->display.get_display_clock_speed =
12034
			valleyview_get_display_clock_speed;
12521
			valleyview_get_display_clock_speed;
12035
	else if (IS_I945G(dev) || (IS_G33(dev) && !IS_PINEVIEW_M(dev)))
12522
	else if (IS_I945G(dev) || (IS_G33(dev) && !IS_PINEVIEW_M(dev)))
12036
		dev_priv->display.get_display_clock_speed =
12523
		dev_priv->display.get_display_clock_speed =
12037
			i945_get_display_clock_speed;
12524
			i945_get_display_clock_speed;
12038
	else if (IS_I915G(dev))
12525
	else if (IS_I915G(dev))
12039
		dev_priv->display.get_display_clock_speed =
12526
		dev_priv->display.get_display_clock_speed =
12040
			i915_get_display_clock_speed;
12527
			i915_get_display_clock_speed;
12041
	else if (IS_I945GM(dev) || IS_845G(dev))
12528
	else if (IS_I945GM(dev) || IS_845G(dev))
12042
		dev_priv->display.get_display_clock_speed =
12529
		dev_priv->display.get_display_clock_speed =
12043
			i9xx_misc_get_display_clock_speed;
12530
			i9xx_misc_get_display_clock_speed;
12044
	else if (IS_PINEVIEW(dev))
12531
	else if (IS_PINEVIEW(dev))
12045
		dev_priv->display.get_display_clock_speed =
12532
		dev_priv->display.get_display_clock_speed =
12046
			pnv_get_display_clock_speed;
12533
			pnv_get_display_clock_speed;
12047
	else if (IS_I915GM(dev))
12534
	else if (IS_I915GM(dev))
12048
		dev_priv->display.get_display_clock_speed =
12535
		dev_priv->display.get_display_clock_speed =
12049
			i915gm_get_display_clock_speed;
12536
			i915gm_get_display_clock_speed;
12050
	else if (IS_I865G(dev))
12537
	else if (IS_I865G(dev))
12051
		dev_priv->display.get_display_clock_speed =
12538
		dev_priv->display.get_display_clock_speed =
12052
			i865_get_display_clock_speed;
12539
			i865_get_display_clock_speed;
12053
	else if (IS_I85X(dev))
12540
	else if (IS_I85X(dev))
12054
		dev_priv->display.get_display_clock_speed =
12541
		dev_priv->display.get_display_clock_speed =
12055
			i855_get_display_clock_speed;
12542
			i855_get_display_clock_speed;
12056
	else /* 852, 830 */
12543
	else /* 852, 830 */
12057
		dev_priv->display.get_display_clock_speed =
12544
		dev_priv->display.get_display_clock_speed =
12058
			i830_get_display_clock_speed;
12545
			i830_get_display_clock_speed;
12059
 
-
 
12060
	if (HAS_PCH_SPLIT(dev)) {
12546
 
12061
		if (IS_GEN5(dev)) {
12547
		if (IS_GEN5(dev)) {
12062
			dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
-
 
12063
			dev_priv->display.write_eld = ironlake_write_eld;
12548
			dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
12064
		} else if (IS_GEN6(dev)) {
12549
		} else if (IS_GEN6(dev)) {
12065
			dev_priv->display.fdi_link_train = gen6_fdi_link_train;
-
 
12066
			dev_priv->display.write_eld = ironlake_write_eld;
-
 
12067
			dev_priv->display.modeset_global_resources =
-
 
12068
				snb_modeset_global_resources;
12550
			dev_priv->display.fdi_link_train = gen6_fdi_link_train;
12069
		} else if (IS_IVYBRIDGE(dev)) {
12551
		} else if (IS_IVYBRIDGE(dev)) {
12070
			/* FIXME: detect B0+ stepping and use auto training */
12552
			/* FIXME: detect B0+ stepping and use auto training */
12071
			dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
12553
			dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
12072
			dev_priv->display.write_eld = ironlake_write_eld;
-
 
12073
			dev_priv->display.modeset_global_resources =
12554
			dev_priv->display.modeset_global_resources =
12074
				ivb_modeset_global_resources;
12555
				ivb_modeset_global_resources;
12075
		} else if (IS_HASWELL(dev) || IS_GEN8(dev)) {
12556
	} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
12076
			dev_priv->display.fdi_link_train = hsw_fdi_link_train;
12557
			dev_priv->display.fdi_link_train = hsw_fdi_link_train;
12077
			dev_priv->display.write_eld = haswell_write_eld;
-
 
12078
			dev_priv->display.modeset_global_resources =
-
 
12079
				haswell_modeset_global_resources;
-
 
12080
		}
-
 
12081
	} else if (IS_G4X(dev)) {
-
 
12082
		dev_priv->display.write_eld = g4x_write_eld;
-
 
12083
	} else if (IS_VALLEYVIEW(dev)) {
12558
	} else if (IS_VALLEYVIEW(dev)) {
12084
		dev_priv->display.modeset_global_resources =
12559
		dev_priv->display.modeset_global_resources =
12085
			valleyview_modeset_global_resources;
12560
			valleyview_modeset_global_resources;
12086
		dev_priv->display.write_eld = ironlake_write_eld;
-
 
12087
	}
12561
	}
12088
 
12562
 
12089
	/* Default just returns -ENODEV to indicate unsupported */
12563
	/* Default just returns -ENODEV to indicate unsupported */
12090
//	dev_priv->display.queue_flip = intel_default_queue_flip;
12564
//	dev_priv->display.queue_flip = intel_default_queue_flip;
12091
 
12565
 
12092
 
12566
 
12093
 
12567
 
12094
 
12568
 
12095
	intel_panel_init_backlight_funcs(dev);
12569
	intel_panel_init_backlight_funcs(dev);
-
 
12570
 
-
 
12571
	mutex_init(&dev_priv->pps_mutex);
12096
}
12572
}
12097
 
12573
 
12098
/*
12574
/*
12099
 * Some BIOSes insist on assuming the GPU's pipe A is enabled at suspend,
12575
 * Some BIOSes insist on assuming the GPU's pipe A is enabled at suspend,
12100
 * resume, or other times.  This quirk makes sure that's the case for
12576
 * resume, or other times.  This quirk makes sure that's the case for
12101
 * affected systems.
12577
 * affected systems.
12102
 */
12578
 */
12103
static void quirk_pipea_force(struct drm_device *dev)
12579
static void quirk_pipea_force(struct drm_device *dev)
12104
{
12580
{
12105
	struct drm_i915_private *dev_priv = dev->dev_private;
12581
	struct drm_i915_private *dev_priv = dev->dev_private;
12106
 
12582
 
12107
	dev_priv->quirks |= QUIRK_PIPEA_FORCE;
12583
	dev_priv->quirks |= QUIRK_PIPEA_FORCE;
12108
	DRM_INFO("applying pipe a force quirk\n");
12584
	DRM_INFO("applying pipe a force quirk\n");
12109
}
12585
}
-
 
12586
 
-
 
12587
static void quirk_pipeb_force(struct drm_device *dev)
-
 
12588
{
-
 
12589
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
12590
 
-
 
12591
	dev_priv->quirks |= QUIRK_PIPEB_FORCE;
-
 
12592
	DRM_INFO("applying pipe b force quirk\n");
-
 
12593
}
12110
 
12594
 
12111
/*
12595
/*
12112
 * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason
12596
 * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason
12113
 */
12597
 */
12114
static void quirk_ssc_force_disable(struct drm_device *dev)
12598
static void quirk_ssc_force_disable(struct drm_device *dev)
12115
{
12599
{
12116
	struct drm_i915_private *dev_priv = dev->dev_private;
12600
	struct drm_i915_private *dev_priv = dev->dev_private;
12117
	dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE;
12601
	dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE;
12118
	DRM_INFO("applying lvds SSC disable quirk\n");
12602
	DRM_INFO("applying lvds SSC disable quirk\n");
12119
}
12603
}
12120
 
12604
 
12121
/*
12605
/*
12122
 * A machine (e.g. Acer Aspire 5734Z) may need to invert the panel backlight
12606
 * A machine (e.g. Acer Aspire 5734Z) may need to invert the panel backlight
12123
 * brightness value
12607
 * brightness value
12124
 */
12608
 */
12125
static void quirk_invert_brightness(struct drm_device *dev)
12609
static void quirk_invert_brightness(struct drm_device *dev)
12126
{
12610
{
12127
	struct drm_i915_private *dev_priv = dev->dev_private;
12611
	struct drm_i915_private *dev_priv = dev->dev_private;
12128
	dev_priv->quirks |= QUIRK_INVERT_BRIGHTNESS;
12612
	dev_priv->quirks |= QUIRK_INVERT_BRIGHTNESS;
12129
	DRM_INFO("applying inverted panel brightness quirk\n");
12613
	DRM_INFO("applying inverted panel brightness quirk\n");
12130
}
12614
}
12131
 
12615
 
12132
/* Some VBT's incorrectly indicate no backlight is present */
12616
/* Some VBT's incorrectly indicate no backlight is present */
12133
static void quirk_backlight_present(struct drm_device *dev)
12617
static void quirk_backlight_present(struct drm_device *dev)
12134
{
12618
{
12135
	struct drm_i915_private *dev_priv = dev->dev_private;
12619
	struct drm_i915_private *dev_priv = dev->dev_private;
12136
	dev_priv->quirks |= QUIRK_BACKLIGHT_PRESENT;
12620
	dev_priv->quirks |= QUIRK_BACKLIGHT_PRESENT;
12137
	DRM_INFO("applying backlight present quirk\n");
12621
	DRM_INFO("applying backlight present quirk\n");
12138
}
12622
}
12139
 
12623
 
12140
struct intel_quirk {
12624
struct intel_quirk {
12141
	int device;
12625
	int device;
12142
	int subsystem_vendor;
12626
	int subsystem_vendor;
12143
	int subsystem_device;
12627
	int subsystem_device;
12144
	void (*hook)(struct drm_device *dev);
12628
	void (*hook)(struct drm_device *dev);
12145
};
12629
};
12146
 
12630
 
12147
/* For systems that don't have a meaningful PCI subdevice/subvendor ID */
12631
/* For systems that don't have a meaningful PCI subdevice/subvendor ID */
12148
struct intel_dmi_quirk {
12632
struct intel_dmi_quirk {
12149
	void (*hook)(struct drm_device *dev);
12633
	void (*hook)(struct drm_device *dev);
12150
	const struct dmi_system_id (*dmi_id_list)[];
12634
	const struct dmi_system_id (*dmi_id_list)[];
12151
};
12635
};
12152
 
12636
 
12153
static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
12637
static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
12154
{
12638
{
12155
	DRM_INFO("Backlight polarity reversed on %s\n", id->ident);
12639
	DRM_INFO("Backlight polarity reversed on %s\n", id->ident);
12156
	return 1;
12640
	return 1;
12157
}
12641
}
12158
 
12642
 
12159
static const struct intel_dmi_quirk intel_dmi_quirks[] = {
12643
static const struct intel_dmi_quirk intel_dmi_quirks[] = {
12160
	{
12644
	{
12161
		.dmi_id_list = &(const struct dmi_system_id[]) {
12645
		.dmi_id_list = &(const struct dmi_system_id[]) {
12162
			{
12646
			{
12163
				.callback = intel_dmi_reverse_brightness,
12647
				.callback = intel_dmi_reverse_brightness,
12164
				.ident = "NCR Corporation",
12648
				.ident = "NCR Corporation",
12165
				.matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
12649
				.matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
12166
					    DMI_MATCH(DMI_PRODUCT_NAME, ""),
12650
					    DMI_MATCH(DMI_PRODUCT_NAME, ""),
12167
				},
12651
				},
12168
			},
12652
			},
12169
			{ }  /* terminating entry */
12653
			{ }  /* terminating entry */
12170
		},
12654
		},
12171
		.hook = quirk_invert_brightness,
12655
		.hook = quirk_invert_brightness,
12172
	},
12656
	},
12173
};
12657
};
12174
 
12658
 
12175
static struct intel_quirk intel_quirks[] = {
12659
static struct intel_quirk intel_quirks[] = {
12176
	/* HP Mini needs pipe A force quirk (LP: #322104) */
12660
	/* HP Mini needs pipe A force quirk (LP: #322104) */
12177
	{ 0x27ae, 0x103c, 0x361a, quirk_pipea_force },
12661
	{ 0x27ae, 0x103c, 0x361a, quirk_pipea_force },
12178
 
12662
 
12179
	/* Toshiba Protege R-205, S-209 needs pipe A force quirk */
12663
	/* Toshiba Protege R-205, S-209 needs pipe A force quirk */
12180
	{ 0x2592, 0x1179, 0x0001, quirk_pipea_force },
12664
	{ 0x2592, 0x1179, 0x0001, quirk_pipea_force },
12181
 
12665
 
12182
	/* ThinkPad T60 needs pipe A force quirk (bug #16494) */
12666
	/* ThinkPad T60 needs pipe A force quirk (bug #16494) */
12183
	{ 0x2782, 0x17aa, 0x201a, quirk_pipea_force },
12667
	{ 0x2782, 0x17aa, 0x201a, quirk_pipea_force },
12184
 
12668
 
12185
	/* Lenovo U160 cannot use SSC on LVDS */
12669
	/* Lenovo U160 cannot use SSC on LVDS */
12186
	{ 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable },
12670
	{ 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable },
12187
 
12671
 
12188
	/* Sony Vaio Y cannot use SSC on LVDS */
12672
	/* Sony Vaio Y cannot use SSC on LVDS */
12189
	{ 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
12673
	{ 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
12190
 
12674
 
12191
	/* Acer Aspire 5734Z must invert backlight brightness */
12675
	/* Acer Aspire 5734Z must invert backlight brightness */
12192
	{ 0x2a42, 0x1025, 0x0459, quirk_invert_brightness },
12676
	{ 0x2a42, 0x1025, 0x0459, quirk_invert_brightness },
12193
 
12677
 
12194
	/* Acer/eMachines G725 */
12678
	/* Acer/eMachines G725 */
12195
	{ 0x2a42, 0x1025, 0x0210, quirk_invert_brightness },
12679
	{ 0x2a42, 0x1025, 0x0210, quirk_invert_brightness },
12196
 
12680
 
12197
	/* Acer/eMachines e725 */
12681
	/* Acer/eMachines e725 */
12198
	{ 0x2a42, 0x1025, 0x0212, quirk_invert_brightness },
12682
	{ 0x2a42, 0x1025, 0x0212, quirk_invert_brightness },
12199
 
12683
 
12200
	/* Acer/Packard Bell NCL20 */
12684
	/* Acer/Packard Bell NCL20 */
12201
	{ 0x2a42, 0x1025, 0x034b, quirk_invert_brightness },
12685
	{ 0x2a42, 0x1025, 0x034b, quirk_invert_brightness },
12202
 
12686
 
12203
	/* Acer Aspire 4736Z */
12687
	/* Acer Aspire 4736Z */
12204
	{ 0x2a42, 0x1025, 0x0260, quirk_invert_brightness },
12688
	{ 0x2a42, 0x1025, 0x0260, quirk_invert_brightness },
12205
 
12689
 
12206
	/* Acer Aspire 5336 */
12690
	/* Acer Aspire 5336 */
12207
	{ 0x2a42, 0x1025, 0x048a, quirk_invert_brightness },
12691
	{ 0x2a42, 0x1025, 0x048a, quirk_invert_brightness },
12208
 
12692
 
12209
	/* Acer C720 and C720P Chromebooks (Celeron 2955U) have backlights */
12693
	/* Acer C720 and C720P Chromebooks (Celeron 2955U) have backlights */
12210
	{ 0x0a06, 0x1025, 0x0a11, quirk_backlight_present },
12694
	{ 0x0a06, 0x1025, 0x0a11, quirk_backlight_present },
12211
 
12695
 
12212
	/* Acer C720 Chromebook (Core i3 4005U) */
12696
	/* Acer C720 Chromebook (Core i3 4005U) */
12213
	{ 0x0a16, 0x1025, 0x0a11, quirk_backlight_present },
12697
	{ 0x0a16, 0x1025, 0x0a11, quirk_backlight_present },
-
 
12698
 
-
 
12699
	/* Apple Macbook 2,1 (Core 2 T7400) */
-
 
12700
	{ 0x27a2, 0x8086, 0x7270, quirk_backlight_present },
12214
 
12701
 
12215
	/* Toshiba CB35 Chromebook (Celeron 2955U) */
12702
	/* Toshiba CB35 Chromebook (Celeron 2955U) */
12216
	{ 0x0a06, 0x1179, 0x0a88, quirk_backlight_present },
12703
	{ 0x0a06, 0x1179, 0x0a88, quirk_backlight_present },
12217
 
12704
 
12218
	/* HP Chromebook 14 (Celeron 2955U) */
12705
	/* HP Chromebook 14 (Celeron 2955U) */
12219
	{ 0x0a06, 0x103c, 0x21ed, quirk_backlight_present },
12706
	{ 0x0a06, 0x103c, 0x21ed, quirk_backlight_present },
12220
};
12707
};
12221
 
12708
 
12222
static void intel_init_quirks(struct drm_device *dev)
12709
static void intel_init_quirks(struct drm_device *dev)
12223
{
12710
{
12224
	struct pci_dev *d = dev->pdev;
12711
	struct pci_dev *d = dev->pdev;
12225
	int i;
12712
	int i;
12226
 
12713
 
12227
	for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) {
12714
	for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) {
12228
		struct intel_quirk *q = &intel_quirks[i];
12715
		struct intel_quirk *q = &intel_quirks[i];
12229
 
12716
 
12230
		if (d->device == q->device &&
12717
		if (d->device == q->device &&
12231
		    (d->subsystem_vendor == q->subsystem_vendor ||
12718
		    (d->subsystem_vendor == q->subsystem_vendor ||
12232
		     q->subsystem_vendor == PCI_ANY_ID) &&
12719
		     q->subsystem_vendor == PCI_ANY_ID) &&
12233
		    (d->subsystem_device == q->subsystem_device ||
12720
		    (d->subsystem_device == q->subsystem_device ||
12234
		     q->subsystem_device == PCI_ANY_ID))
12721
		     q->subsystem_device == PCI_ANY_ID))
12235
			q->hook(dev);
12722
			q->hook(dev);
12236
	}
12723
	}
12237
	for (i = 0; i < ARRAY_SIZE(intel_dmi_quirks); i++) {
12724
	for (i = 0; i < ARRAY_SIZE(intel_dmi_quirks); i++) {
12238
		if (dmi_check_system(*intel_dmi_quirks[i].dmi_id_list) != 0)
12725
		if (dmi_check_system(*intel_dmi_quirks[i].dmi_id_list) != 0)
12239
			intel_dmi_quirks[i].hook(dev);
12726
			intel_dmi_quirks[i].hook(dev);
12240
	}
12727
	}
12241
}
12728
}
12242
 
12729
 
12243
/* Disable the VGA plane that we never use */
12730
/* Disable the VGA plane that we never use */
12244
static void i915_disable_vga(struct drm_device *dev)
12731
static void i915_disable_vga(struct drm_device *dev)
12245
{
12732
{
12246
	struct drm_i915_private *dev_priv = dev->dev_private;
12733
	struct drm_i915_private *dev_priv = dev->dev_private;
12247
	u8 sr1;
12734
	u8 sr1;
12248
	u32 vga_reg = i915_vgacntrl_reg(dev);
12735
	u32 vga_reg = i915_vgacntrl_reg(dev);
12249
 
12736
 
12250
//	vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
12737
//	vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
12251
	outb(SR01, VGA_SR_INDEX);
12738
	outb(SR01, VGA_SR_INDEX);
12252
	sr1 = inb(VGA_SR_DATA);
12739
	sr1 = inb(VGA_SR_DATA);
12253
	outb(sr1 | 1<<5, VGA_SR_DATA);
12740
	outb(sr1 | 1<<5, VGA_SR_DATA);
12254
//	vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
12741
//	vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
12255
	udelay(300);
12742
	udelay(300);
-
 
12743
 
-
 
12744
	/*
-
 
12745
	 * Fujitsu-Siemens Lifebook S6010 (830) has problems resuming
-
 
12746
	 * from S3 without preserving (some of?) the other bits.
12256
 
12747
	 */
12257
	I915_WRITE(vga_reg, VGA_DISP_DISABLE);
12748
	I915_WRITE(vga_reg, dev_priv->bios_vgacntr | VGA_DISP_DISABLE);
12258
	POSTING_READ(vga_reg);
12749
	POSTING_READ(vga_reg);
12259
}
12750
}
12260
 
12751
 
12261
void intel_modeset_init_hw(struct drm_device *dev)
12752
void intel_modeset_init_hw(struct drm_device *dev)
12262
{
12753
{
12263
	intel_prepare_ddi(dev);
12754
	intel_prepare_ddi(dev);
12264
 
12755
 
12265
	if (IS_VALLEYVIEW(dev))
12756
	if (IS_VALLEYVIEW(dev))
12266
		vlv_update_cdclk(dev);
12757
		vlv_update_cdclk(dev);
12267
 
12758
 
12268
	intel_init_clock_gating(dev);
12759
	intel_init_clock_gating(dev);
12269
 
-
 
12270
	intel_reset_dpio(dev);
-
 
12271
 
12760
 
12272
    intel_enable_gt_powersave(dev);
12761
    intel_enable_gt_powersave(dev);
12273
}
12762
}
12274
 
-
 
12275
void intel_modeset_suspend_hw(struct drm_device *dev)
-
 
12276
{
-
 
12277
	intel_suspend_hw(dev);
-
 
12278
}
-
 
12279
 
12763
 
12280
void intel_modeset_init(struct drm_device *dev)
12764
void intel_modeset_init(struct drm_device *dev)
12281
{
12765
{
12282
	struct drm_i915_private *dev_priv = dev->dev_private;
12766
	struct drm_i915_private *dev_priv = dev->dev_private;
12283
	int sprite, ret;
12767
	int sprite, ret;
12284
	enum pipe pipe;
12768
	enum pipe pipe;
12285
	struct intel_crtc *crtc;
12769
	struct intel_crtc *crtc;
12286
 
12770
 
12287
	drm_mode_config_init(dev);
12771
	drm_mode_config_init(dev);
12288
 
12772
 
12289
	dev->mode_config.min_width = 0;
12773
	dev->mode_config.min_width = 0;
12290
	dev->mode_config.min_height = 0;
12774
	dev->mode_config.min_height = 0;
12291
 
12775
 
12292
	dev->mode_config.preferred_depth = 24;
12776
	dev->mode_config.preferred_depth = 24;
12293
	dev->mode_config.prefer_shadow = 1;
12777
	dev->mode_config.prefer_shadow = 1;
12294
 
12778
 
12295
	dev->mode_config.funcs = &intel_mode_funcs;
12779
	dev->mode_config.funcs = &intel_mode_funcs;
12296
 
12780
 
12297
	intel_init_quirks(dev);
12781
	intel_init_quirks(dev);
12298
 
12782
 
12299
	intel_init_pm(dev);
12783
	intel_init_pm(dev);
12300
 
12784
 
12301
	if (INTEL_INFO(dev)->num_pipes == 0)
12785
	if (INTEL_INFO(dev)->num_pipes == 0)
12302
		return;
12786
		return;
12303
 
12787
 
12304
	intel_init_display(dev);
12788
	intel_init_display(dev);
12305
 
12789
 
12306
	if (IS_GEN2(dev)) {
12790
	if (IS_GEN2(dev)) {
12307
		dev->mode_config.max_width = 2048;
12791
		dev->mode_config.max_width = 2048;
12308
		dev->mode_config.max_height = 2048;
12792
		dev->mode_config.max_height = 2048;
12309
	} else if (IS_GEN3(dev)) {
12793
	} else if (IS_GEN3(dev)) {
12310
		dev->mode_config.max_width = 4096;
12794
		dev->mode_config.max_width = 4096;
12311
		dev->mode_config.max_height = 4096;
12795
		dev->mode_config.max_height = 4096;
12312
	} else {
12796
	} else {
12313
		dev->mode_config.max_width = 8192;
12797
		dev->mode_config.max_width = 8192;
12314
		dev->mode_config.max_height = 8192;
12798
		dev->mode_config.max_height = 8192;
12315
	}
12799
	}
12316
 
12800
 
12317
	if (IS_GEN2(dev)) {
12801
	if (IS_GEN2(dev)) {
12318
		dev->mode_config.cursor_width = GEN2_CURSOR_WIDTH;
12802
		dev->mode_config.cursor_width = GEN2_CURSOR_WIDTH;
12319
		dev->mode_config.cursor_height = GEN2_CURSOR_HEIGHT;
12803
		dev->mode_config.cursor_height = GEN2_CURSOR_HEIGHT;
12320
	} else {
12804
	} else {
12321
		dev->mode_config.cursor_width = MAX_CURSOR_WIDTH;
12805
		dev->mode_config.cursor_width = MAX_CURSOR_WIDTH;
12322
		dev->mode_config.cursor_height = MAX_CURSOR_HEIGHT;
12806
		dev->mode_config.cursor_height = MAX_CURSOR_HEIGHT;
12323
	}
12807
	}
12324
 
12808
 
12325
	dev->mode_config.fb_base = dev_priv->gtt.mappable_base;
12809
	dev->mode_config.fb_base = dev_priv->gtt.mappable_base;
12326
 
12810
 
12327
	DRM_DEBUG_KMS("%d display pipe%s available.\n",
12811
	DRM_DEBUG_KMS("%d display pipe%s available.\n",
12328
		      INTEL_INFO(dev)->num_pipes,
12812
		      INTEL_INFO(dev)->num_pipes,
12329
		      INTEL_INFO(dev)->num_pipes > 1 ? "s" : "");
12813
		      INTEL_INFO(dev)->num_pipes > 1 ? "s" : "");
12330
 
12814
 
12331
	for_each_pipe(pipe) {
12815
	for_each_pipe(dev_priv, pipe) {
12332
		intel_crtc_init(dev, pipe);
12816
		intel_crtc_init(dev, pipe);
12333
		for_each_sprite(pipe, sprite) {
12817
		for_each_sprite(pipe, sprite) {
12334
			ret = intel_plane_init(dev, pipe, sprite);
12818
			ret = intel_plane_init(dev, pipe, sprite);
12335
		if (ret)
12819
		if (ret)
12336
				DRM_DEBUG_KMS("pipe %c sprite %c init failed: %d\n",
12820
				DRM_DEBUG_KMS("pipe %c sprite %c init failed: %d\n",
12337
					      pipe_name(pipe), sprite_name(pipe, sprite), ret);
12821
					      pipe_name(pipe), sprite_name(pipe, sprite), ret);
12338
		}
12822
		}
12339
	}
12823
	}
12340
 
12824
 
12341
	intel_init_dpio(dev);
12825
	intel_init_dpio(dev);
12342
	intel_reset_dpio(dev);
-
 
12343
 
12826
 
12344
	intel_shared_dpll_init(dev);
12827
	intel_shared_dpll_init(dev);
-
 
12828
 
-
 
12829
	/* save the BIOS value before clobbering it */
12345
 
12830
	dev_priv->bios_vgacntr = I915_READ(i915_vgacntrl_reg(dev));
12346
	/* Just disable it once at startup */
12831
	/* Just disable it once at startup */
12347
	i915_disable_vga(dev);
12832
	i915_disable_vga(dev);
12348
	intel_setup_outputs(dev);
12833
	intel_setup_outputs(dev);
12349
 
12834
 
12350
	/* Just in case the BIOS is doing something questionable. */
12835
	/* Just in case the BIOS is doing something questionable. */
12351
	intel_disable_fbc(dev);
12836
	intel_disable_fbc(dev);
12352
 
12837
 
12353
	drm_modeset_lock_all(dev);
12838
	drm_modeset_lock_all(dev);
12354
	intel_modeset_setup_hw_state(dev, false);
12839
	intel_modeset_setup_hw_state(dev, false);
12355
	drm_modeset_unlock_all(dev);
12840
	drm_modeset_unlock_all(dev);
12356
 
12841
 
12357
	for_each_intel_crtc(dev, crtc) {
12842
	for_each_intel_crtc(dev, crtc) {
12358
		if (!crtc->active)
12843
		if (!crtc->active)
12359
			continue;
12844
			continue;
12360
 
12845
 
12361
		/*
12846
		/*
12362
		 * Note that reserving the BIOS fb up front prevents us
12847
		 * Note that reserving the BIOS fb up front prevents us
12363
		 * from stuffing other stolen allocations like the ring
12848
		 * from stuffing other stolen allocations like the ring
12364
		 * on top.  This prevents some ugliness at boot time, and
12849
		 * on top.  This prevents some ugliness at boot time, and
12365
		 * can even allow for smooth boot transitions if the BIOS
12850
		 * can even allow for smooth boot transitions if the BIOS
12366
		 * fb is large enough for the active pipe configuration.
12851
		 * fb is large enough for the active pipe configuration.
12367
		 */
12852
		 */
12368
		if (dev_priv->display.get_plane_config) {
12853
		if (dev_priv->display.get_plane_config) {
12369
			dev_priv->display.get_plane_config(crtc,
12854
			dev_priv->display.get_plane_config(crtc,
12370
							   &crtc->plane_config);
12855
							   &crtc->plane_config);
12371
			/*
12856
			/*
12372
			 * If the fb is shared between multiple heads, we'll
12857
			 * If the fb is shared between multiple heads, we'll
12373
			 * just get the first one.
12858
			 * just get the first one.
12374
			 */
12859
			 */
12375
			intel_find_plane_obj(crtc, &crtc->plane_config);
12860
			intel_find_plane_obj(crtc, &crtc->plane_config);
12376
		}
12861
		}
12377
	}
12862
	}
12378
}
12863
}
12379
 
12864
 
12380
static void intel_enable_pipe_a(struct drm_device *dev)
12865
static void intel_enable_pipe_a(struct drm_device *dev)
12381
{
12866
{
12382
	struct intel_connector *connector;
12867
	struct intel_connector *connector;
12383
	struct drm_connector *crt = NULL;
12868
	struct drm_connector *crt = NULL;
12384
	struct intel_load_detect_pipe load_detect_temp;
12869
	struct intel_load_detect_pipe load_detect_temp;
12385
	struct drm_modeset_acquire_ctx *ctx = dev->mode_config.acquire_ctx;
12870
	struct drm_modeset_acquire_ctx *ctx = dev->mode_config.acquire_ctx;
12386
 
12871
 
12387
	/* We can't just switch on the pipe A, we need to set things up with a
12872
	/* We can't just switch on the pipe A, we need to set things up with a
12388
	 * proper mode and output configuration. As a gross hack, enable pipe A
12873
	 * proper mode and output configuration. As a gross hack, enable pipe A
12389
	 * by enabling the load detect pipe once. */
12874
	 * by enabling the load detect pipe once. */
12390
	list_for_each_entry(connector,
12875
	list_for_each_entry(connector,
12391
			    &dev->mode_config.connector_list,
12876
			    &dev->mode_config.connector_list,
12392
			    base.head) {
12877
			    base.head) {
12393
		if (connector->encoder->type == INTEL_OUTPUT_ANALOG) {
12878
		if (connector->encoder->type == INTEL_OUTPUT_ANALOG) {
12394
			crt = &connector->base;
12879
			crt = &connector->base;
12395
			break;
12880
			break;
12396
		}
12881
		}
12397
	}
12882
	}
12398
 
12883
 
12399
	if (!crt)
12884
	if (!crt)
12400
		return;
12885
		return;
12401
 
12886
 
12402
	if (intel_get_load_detect_pipe(crt, NULL, &load_detect_temp, ctx))
12887
	if (intel_get_load_detect_pipe(crt, NULL, &load_detect_temp, ctx))
12403
		intel_release_load_detect_pipe(crt, &load_detect_temp);
12888
		intel_release_load_detect_pipe(crt, &load_detect_temp);
12404
}
12889
}
12405
 
12890
 
12406
static bool
12891
static bool
12407
intel_check_plane_mapping(struct intel_crtc *crtc)
12892
intel_check_plane_mapping(struct intel_crtc *crtc)
12408
{
12893
{
12409
	struct drm_device *dev = crtc->base.dev;
12894
	struct drm_device *dev = crtc->base.dev;
12410
	struct drm_i915_private *dev_priv = dev->dev_private;
12895
	struct drm_i915_private *dev_priv = dev->dev_private;
12411
	u32 reg, val;
12896
	u32 reg, val;
12412
 
12897
 
12413
	if (INTEL_INFO(dev)->num_pipes == 1)
12898
	if (INTEL_INFO(dev)->num_pipes == 1)
12414
		return true;
12899
		return true;
12415
 
12900
 
12416
	reg = DSPCNTR(!crtc->plane);
12901
	reg = DSPCNTR(!crtc->plane);
12417
	val = I915_READ(reg);
12902
	val = I915_READ(reg);
12418
 
12903
 
12419
	if ((val & DISPLAY_PLANE_ENABLE) &&
12904
	if ((val & DISPLAY_PLANE_ENABLE) &&
12420
	    (!!(val & DISPPLANE_SEL_PIPE_MASK) == crtc->pipe))
12905
	    (!!(val & DISPPLANE_SEL_PIPE_MASK) == crtc->pipe))
12421
		return false;
12906
		return false;
12422
 
12907
 
12423
	return true;
12908
	return true;
12424
}
12909
}
12425
 
12910
 
12426
static void intel_sanitize_crtc(struct intel_crtc *crtc)
12911
static void intel_sanitize_crtc(struct intel_crtc *crtc)
12427
{
12912
{
12428
	struct drm_device *dev = crtc->base.dev;
12913
	struct drm_device *dev = crtc->base.dev;
12429
	struct drm_i915_private *dev_priv = dev->dev_private;
12914
	struct drm_i915_private *dev_priv = dev->dev_private;
12430
	u32 reg;
12915
	u32 reg;
12431
 
12916
 
12432
	/* Clear any frame start delays used for debugging left by the BIOS */
12917
	/* Clear any frame start delays used for debugging left by the BIOS */
12433
	reg = PIPECONF(crtc->config.cpu_transcoder);
12918
	reg = PIPECONF(crtc->config.cpu_transcoder);
12434
	I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
12919
	I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
12435
 
12920
 
12436
	/* restore vblank interrupts to correct state */
12921
	/* restore vblank interrupts to correct state */
12437
	if (crtc->active)
12922
	if (crtc->active) {
-
 
12923
		update_scanline_offset(crtc);
12438
		drm_vblank_on(dev, crtc->pipe);
12924
		drm_vblank_on(dev, crtc->pipe);
12439
	else
12925
	} else
12440
		drm_vblank_off(dev, crtc->pipe);
12926
		drm_vblank_off(dev, crtc->pipe);
12441
 
12927
 
12442
	/* We need to sanitize the plane -> pipe mapping first because this will
12928
	/* We need to sanitize the plane -> pipe mapping first because this will
12443
	 * disable the crtc (and hence change the state) if it is wrong. Note
12929
	 * disable the crtc (and hence change the state) if it is wrong. Note
12444
	 * that gen4+ has a fixed plane -> pipe mapping.  */
12930
	 * that gen4+ has a fixed plane -> pipe mapping.  */
12445
	if (INTEL_INFO(dev)->gen < 4 && !intel_check_plane_mapping(crtc)) {
12931
	if (INTEL_INFO(dev)->gen < 4 && !intel_check_plane_mapping(crtc)) {
12446
		struct intel_connector *connector;
12932
		struct intel_connector *connector;
12447
		bool plane;
12933
		bool plane;
12448
 
12934
 
12449
		DRM_DEBUG_KMS("[CRTC:%d] wrong plane connection detected!\n",
12935
		DRM_DEBUG_KMS("[CRTC:%d] wrong plane connection detected!\n",
12450
			      crtc->base.base.id);
12936
			      crtc->base.base.id);
12451
 
12937
 
12452
		/* Pipe has the wrong plane attached and the plane is active.
12938
		/* Pipe has the wrong plane attached and the plane is active.
12453
		 * Temporarily change the plane mapping and disable everything
12939
		 * Temporarily change the plane mapping and disable everything
12454
		 * ...  */
12940
		 * ...  */
12455
		plane = crtc->plane;
12941
		plane = crtc->plane;
12456
		crtc->plane = !plane;
12942
		crtc->plane = !plane;
12457
		crtc->primary_enabled = true;
12943
		crtc->primary_enabled = true;
12458
		dev_priv->display.crtc_disable(&crtc->base);
12944
		dev_priv->display.crtc_disable(&crtc->base);
12459
		crtc->plane = plane;
12945
		crtc->plane = plane;
12460
 
12946
 
12461
		/* ... and break all links. */
12947
		/* ... and break all links. */
12462
		list_for_each_entry(connector, &dev->mode_config.connector_list,
12948
		list_for_each_entry(connector, &dev->mode_config.connector_list,
12463
				    base.head) {
12949
				    base.head) {
12464
			if (connector->encoder->base.crtc != &crtc->base)
12950
			if (connector->encoder->base.crtc != &crtc->base)
12465
				continue;
12951
				continue;
12466
 
12952
 
12467
			connector->base.dpms = DRM_MODE_DPMS_OFF;
12953
			connector->base.dpms = DRM_MODE_DPMS_OFF;
12468
			connector->base.encoder = NULL;
12954
			connector->base.encoder = NULL;
12469
		}
12955
		}
12470
		/* multiple connectors may have the same encoder:
12956
		/* multiple connectors may have the same encoder:
12471
		 *  handle them and break crtc link separately */
12957
		 *  handle them and break crtc link separately */
12472
		list_for_each_entry(connector, &dev->mode_config.connector_list,
12958
		list_for_each_entry(connector, &dev->mode_config.connector_list,
12473
				    base.head)
12959
				    base.head)
12474
			if (connector->encoder->base.crtc == &crtc->base) {
12960
			if (connector->encoder->base.crtc == &crtc->base) {
12475
				connector->encoder->base.crtc = NULL;
12961
				connector->encoder->base.crtc = NULL;
12476
				connector->encoder->connectors_active = false;
12962
				connector->encoder->connectors_active = false;
12477
		}
12963
		}
12478
 
12964
 
12479
		WARN_ON(crtc->active);
12965
		WARN_ON(crtc->active);
12480
		crtc->base.enabled = false;
12966
		crtc->base.enabled = false;
12481
	}
12967
	}
12482
 
12968
 
12483
	if (dev_priv->quirks & QUIRK_PIPEA_FORCE &&
12969
	if (dev_priv->quirks & QUIRK_PIPEA_FORCE &&
12484
	    crtc->pipe == PIPE_A && !crtc->active) {
12970
	    crtc->pipe == PIPE_A && !crtc->active) {
12485
		/* BIOS forgot to enable pipe A, this mostly happens after
12971
		/* BIOS forgot to enable pipe A, this mostly happens after
12486
		 * resume. Force-enable the pipe to fix this, the update_dpms
12972
		 * resume. Force-enable the pipe to fix this, the update_dpms
12487
		 * call below we restore the pipe to the right state, but leave
12973
		 * call below we restore the pipe to the right state, but leave
12488
		 * the required bits on. */
12974
		 * the required bits on. */
12489
		intel_enable_pipe_a(dev);
12975
		intel_enable_pipe_a(dev);
12490
	}
12976
	}
12491
 
12977
 
12492
	/* Adjust the state of the output pipe according to whether we
12978
	/* Adjust the state of the output pipe according to whether we
12493
	 * have active connectors/encoders. */
12979
	 * have active connectors/encoders. */
12494
	intel_crtc_update_dpms(&crtc->base);
12980
	intel_crtc_update_dpms(&crtc->base);
12495
 
12981
 
12496
	if (crtc->active != crtc->base.enabled) {
12982
	if (crtc->active != crtc->base.enabled) {
12497
		struct intel_encoder *encoder;
12983
		struct intel_encoder *encoder;
12498
 
12984
 
12499
		/* This can happen either due to bugs in the get_hw_state
12985
		/* This can happen either due to bugs in the get_hw_state
12500
		 * functions or because the pipe is force-enabled due to the
12986
		 * functions or because the pipe is force-enabled due to the
12501
		 * pipe A quirk. */
12987
		 * pipe A quirk. */
12502
		DRM_DEBUG_KMS("[CRTC:%d] hw state adjusted, was %s, now %s\n",
12988
		DRM_DEBUG_KMS("[CRTC:%d] hw state adjusted, was %s, now %s\n",
12503
			      crtc->base.base.id,
12989
			      crtc->base.base.id,
12504
			      crtc->base.enabled ? "enabled" : "disabled",
12990
			      crtc->base.enabled ? "enabled" : "disabled",
12505
			      crtc->active ? "enabled" : "disabled");
12991
			      crtc->active ? "enabled" : "disabled");
12506
 
12992
 
12507
		crtc->base.enabled = crtc->active;
12993
		crtc->base.enabled = crtc->active;
12508
 
12994
 
12509
		/* Because we only establish the connector -> encoder ->
12995
		/* Because we only establish the connector -> encoder ->
12510
		 * crtc links if something is active, this means the
12996
		 * crtc links if something is active, this means the
12511
		 * crtc is now deactivated. Break the links. connector
12997
		 * crtc is now deactivated. Break the links. connector
12512
		 * -> encoder links are only establish when things are
12998
		 * -> encoder links are only establish when things are
12513
		 *  actually up, hence no need to break them. */
12999
		 *  actually up, hence no need to break them. */
12514
		WARN_ON(crtc->active);
13000
		WARN_ON(crtc->active);
12515
 
13001
 
12516
		for_each_encoder_on_crtc(dev, &crtc->base, encoder) {
13002
		for_each_encoder_on_crtc(dev, &crtc->base, encoder) {
12517
			WARN_ON(encoder->connectors_active);
13003
			WARN_ON(encoder->connectors_active);
12518
			encoder->base.crtc = NULL;
13004
			encoder->base.crtc = NULL;
12519
		}
13005
		}
12520
	}
13006
	}
12521
 
13007
 
12522
	if (crtc->active || IS_VALLEYVIEW(dev) || INTEL_INFO(dev)->gen < 5) {
13008
	if (crtc->active || HAS_GMCH_DISPLAY(dev)) {
12523
		/*
13009
		/*
12524
		 * We start out with underrun reporting disabled to avoid races.
13010
		 * We start out with underrun reporting disabled to avoid races.
12525
		 * For correct bookkeeping mark this on active crtcs.
13011
		 * For correct bookkeeping mark this on active crtcs.
12526
		 *
13012
		 *
12527
		 * Also on gmch platforms we dont have any hardware bits to
13013
		 * Also on gmch platforms we dont have any hardware bits to
12528
		 * disable the underrun reporting. Which means we need to start
13014
		 * disable the underrun reporting. Which means we need to start
12529
		 * out with underrun reporting disabled also on inactive pipes,
13015
		 * out with underrun reporting disabled also on inactive pipes,
12530
		 * since otherwise we'll complain about the garbage we read when
13016
		 * since otherwise we'll complain about the garbage we read when
12531
		 * e.g. coming up after runtime pm.
13017
		 * e.g. coming up after runtime pm.
12532
		 *
13018
		 *
12533
		 * No protection against concurrent access is required - at
13019
		 * No protection against concurrent access is required - at
12534
		 * worst a fifo underrun happens which also sets this to false.
13020
		 * worst a fifo underrun happens which also sets this to false.
12535
		 */
13021
		 */
12536
		crtc->cpu_fifo_underrun_disabled = true;
13022
		crtc->cpu_fifo_underrun_disabled = true;
12537
		crtc->pch_fifo_underrun_disabled = true;
13023
		crtc->pch_fifo_underrun_disabled = true;
12538
 
-
 
12539
		update_scanline_offset(crtc);
-
 
12540
	}
13024
	}
12541
}
13025
}
12542
 
13026
 
12543
static void intel_sanitize_encoder(struct intel_encoder *encoder)
13027
static void intel_sanitize_encoder(struct intel_encoder *encoder)
12544
{
13028
{
12545
	struct intel_connector *connector;
13029
	struct intel_connector *connector;
12546
	struct drm_device *dev = encoder->base.dev;
13030
	struct drm_device *dev = encoder->base.dev;
12547
 
13031
 
12548
	/* We need to check both for a crtc link (meaning that the
13032
	/* We need to check both for a crtc link (meaning that the
12549
	 * encoder is active and trying to read from a pipe) and the
13033
	 * encoder is active and trying to read from a pipe) and the
12550
	 * pipe itself being active. */
13034
	 * pipe itself being active. */
12551
	bool has_active_crtc = encoder->base.crtc &&
13035
	bool has_active_crtc = encoder->base.crtc &&
12552
		to_intel_crtc(encoder->base.crtc)->active;
13036
		to_intel_crtc(encoder->base.crtc)->active;
12553
 
13037
 
12554
	if (encoder->connectors_active && !has_active_crtc) {
13038
	if (encoder->connectors_active && !has_active_crtc) {
12555
		DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n",
13039
		DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n",
12556
			      encoder->base.base.id,
13040
			      encoder->base.base.id,
12557
			      encoder->base.name);
13041
			      encoder->base.name);
12558
 
13042
 
12559
		/* Connector is active, but has no active pipe. This is
13043
		/* Connector is active, but has no active pipe. This is
12560
		 * fallout from our resume register restoring. Disable
13044
		 * fallout from our resume register restoring. Disable
12561
		 * the encoder manually again. */
13045
		 * the encoder manually again. */
12562
		if (encoder->base.crtc) {
13046
		if (encoder->base.crtc) {
12563
			DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n",
13047
			DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n",
12564
				      encoder->base.base.id,
13048
				      encoder->base.base.id,
12565
				      encoder->base.name);
13049
				      encoder->base.name);
12566
			encoder->disable(encoder);
13050
			encoder->disable(encoder);
12567
			if (encoder->post_disable)
13051
			if (encoder->post_disable)
12568
				encoder->post_disable(encoder);
13052
				encoder->post_disable(encoder);
12569
		}
13053
		}
12570
		encoder->base.crtc = NULL;
13054
		encoder->base.crtc = NULL;
12571
		encoder->connectors_active = false;
13055
		encoder->connectors_active = false;
12572
 
13056
 
12573
		/* Inconsistent output/port/pipe state happens presumably due to
13057
		/* Inconsistent output/port/pipe state happens presumably due to
12574
		 * a bug in one of the get_hw_state functions. Or someplace else
13058
		 * a bug in one of the get_hw_state functions. Or someplace else
12575
		 * in our code, like the register restore mess on resume. Clamp
13059
		 * in our code, like the register restore mess on resume. Clamp
12576
		 * things to off as a safer default. */
13060
		 * things to off as a safer default. */
12577
		list_for_each_entry(connector,
13061
		list_for_each_entry(connector,
12578
				    &dev->mode_config.connector_list,
13062
				    &dev->mode_config.connector_list,
12579
				    base.head) {
13063
				    base.head) {
12580
			if (connector->encoder != encoder)
13064
			if (connector->encoder != encoder)
12581
				continue;
13065
				continue;
12582
			connector->base.dpms = DRM_MODE_DPMS_OFF;
13066
			connector->base.dpms = DRM_MODE_DPMS_OFF;
12583
			connector->base.encoder = NULL;
13067
			connector->base.encoder = NULL;
12584
		}
13068
		}
12585
	}
13069
	}
12586
	/* Enabled encoders without active connectors will be fixed in
13070
	/* Enabled encoders without active connectors will be fixed in
12587
	 * the crtc fixup. */
13071
	 * the crtc fixup. */
12588
}
13072
}
12589
 
13073
 
12590
void i915_redisable_vga_power_on(struct drm_device *dev)
13074
void i915_redisable_vga_power_on(struct drm_device *dev)
12591
{
13075
{
12592
	struct drm_i915_private *dev_priv = dev->dev_private;
13076
	struct drm_i915_private *dev_priv = dev->dev_private;
12593
	u32 vga_reg = i915_vgacntrl_reg(dev);
13077
	u32 vga_reg = i915_vgacntrl_reg(dev);
12594
 
13078
 
12595
	if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) {
13079
	if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) {
12596
		DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
13080
		DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
12597
		i915_disable_vga(dev);
13081
		i915_disable_vga(dev);
12598
	}
13082
	}
12599
}
13083
}
12600
 
13084
 
12601
void i915_redisable_vga(struct drm_device *dev)
13085
void i915_redisable_vga(struct drm_device *dev)
12602
{
13086
{
12603
	struct drm_i915_private *dev_priv = dev->dev_private;
13087
	struct drm_i915_private *dev_priv = dev->dev_private;
12604
 
13088
 
12605
	/* This function can be called both from intel_modeset_setup_hw_state or
13089
	/* This function can be called both from intel_modeset_setup_hw_state or
12606
	 * at a very early point in our resume sequence, where the power well
13090
	 * at a very early point in our resume sequence, where the power well
12607
	 * structures are not yet restored. Since this function is at a very
13091
	 * structures are not yet restored. Since this function is at a very
12608
	 * paranoid "someone might have enabled VGA while we were not looking"
13092
	 * paranoid "someone might have enabled VGA while we were not looking"
12609
	 * level, just check if the power well is enabled instead of trying to
13093
	 * level, just check if the power well is enabled instead of trying to
12610
	 * follow the "don't touch the power well if we don't need it" policy
13094
	 * follow the "don't touch the power well if we don't need it" policy
12611
	 * the rest of the driver uses. */
13095
	 * the rest of the driver uses. */
12612
	if (!intel_display_power_enabled(dev_priv, POWER_DOMAIN_VGA))
13096
	if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_VGA))
12613
		return;
13097
		return;
12614
 
13098
 
12615
	i915_redisable_vga_power_on(dev);
13099
	i915_redisable_vga_power_on(dev);
12616
}
13100
}
12617
 
13101
 
12618
static bool primary_get_hw_state(struct intel_crtc *crtc)
13102
static bool primary_get_hw_state(struct intel_crtc *crtc)
12619
{
13103
{
12620
	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
13104
	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
12621
 
13105
 
12622
	if (!crtc->active)
13106
	if (!crtc->active)
12623
		return false;
13107
		return false;
12624
 
13108
 
12625
	return I915_READ(DSPCNTR(crtc->plane)) & DISPLAY_PLANE_ENABLE;
13109
	return I915_READ(DSPCNTR(crtc->plane)) & DISPLAY_PLANE_ENABLE;
12626
}
13110
}
12627
 
13111
 
12628
static void intel_modeset_readout_hw_state(struct drm_device *dev)
13112
static void intel_modeset_readout_hw_state(struct drm_device *dev)
12629
{
13113
{
12630
	struct drm_i915_private *dev_priv = dev->dev_private;
13114
	struct drm_i915_private *dev_priv = dev->dev_private;
12631
	enum pipe pipe;
13115
	enum pipe pipe;
12632
	struct intel_crtc *crtc;
13116
	struct intel_crtc *crtc;
12633
	struct intel_encoder *encoder;
13117
	struct intel_encoder *encoder;
12634
	struct intel_connector *connector;
13118
	struct intel_connector *connector;
12635
	int i;
13119
	int i;
12636
 
13120
 
12637
	for_each_intel_crtc(dev, crtc) {
13121
	for_each_intel_crtc(dev, crtc) {
12638
		memset(&crtc->config, 0, sizeof(crtc->config));
13122
		memset(&crtc->config, 0, sizeof(crtc->config));
12639
 
13123
 
12640
		crtc->config.quirks |= PIPE_CONFIG_QUIRK_INHERITED_MODE;
13124
		crtc->config.quirks |= PIPE_CONFIG_QUIRK_INHERITED_MODE;
12641
 
13125
 
12642
		crtc->active = dev_priv->display.get_pipe_config(crtc,
13126
		crtc->active = dev_priv->display.get_pipe_config(crtc,
12643
								 &crtc->config);
13127
								 &crtc->config);
12644
 
13128
 
12645
		crtc->base.enabled = crtc->active;
13129
		crtc->base.enabled = crtc->active;
12646
		crtc->primary_enabled = primary_get_hw_state(crtc);
13130
		crtc->primary_enabled = primary_get_hw_state(crtc);
12647
 
13131
 
12648
		DRM_DEBUG_KMS("[CRTC:%d] hw state readout: %s\n",
13132
		DRM_DEBUG_KMS("[CRTC:%d] hw state readout: %s\n",
12649
			      crtc->base.base.id,
13133
			      crtc->base.base.id,
12650
			      crtc->active ? "enabled" : "disabled");
13134
			      crtc->active ? "enabled" : "disabled");
12651
	}
13135
	}
12652
 
13136
 
12653
	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
13137
	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
12654
		struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
13138
		struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
12655
 
13139
 
-
 
13140
		pll->on = pll->get_hw_state(dev_priv, pll,
12656
		pll->on = pll->get_hw_state(dev_priv, pll, &pll->hw_state);
13141
					    &pll->config.hw_state);
-
 
13142
		pll->active = 0;
12657
		pll->active = 0;
13143
		pll->config.crtc_mask = 0;
12658
		for_each_intel_crtc(dev, crtc) {
13144
		for_each_intel_crtc(dev, crtc) {
12659
			if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll)
13145
			if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll) {
-
 
13146
				pll->active++;
-
 
13147
				pll->config.crtc_mask |= 1 << crtc->pipe;
12660
				pll->active++;
13148
			}
12661
		}
-
 
12662
		pll->refcount = pll->active;
13149
		}
12663
 
13150
 
12664
		DRM_DEBUG_KMS("%s hw state readout: refcount %i, on %i\n",
13151
		DRM_DEBUG_KMS("%s hw state readout: crtc_mask 0x%08x, on %i\n",
12665
			      pll->name, pll->refcount, pll->on);
13152
			      pll->name, pll->config.crtc_mask, pll->on);
12666
 
13153
 
12667
		if (pll->refcount)
13154
		if (pll->config.crtc_mask)
12668
			intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS);
13155
			intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS);
12669
	}
13156
	}
12670
 
13157
 
12671
	list_for_each_entry(encoder, &dev->mode_config.encoder_list,
-
 
12672
			    base.head) {
13158
	for_each_intel_encoder(dev, encoder) {
12673
		pipe = 0;
13159
		pipe = 0;
12674
 
13160
 
12675
		if (encoder->get_hw_state(encoder, &pipe)) {
13161
		if (encoder->get_hw_state(encoder, &pipe)) {
12676
			crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
13162
			crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
12677
			encoder->base.crtc = &crtc->base;
13163
			encoder->base.crtc = &crtc->base;
12678
				encoder->get_config(encoder, &crtc->config);
13164
				encoder->get_config(encoder, &crtc->config);
12679
		} else {
13165
		} else {
12680
			encoder->base.crtc = NULL;
13166
			encoder->base.crtc = NULL;
12681
		}
13167
		}
12682
 
13168
 
12683
		encoder->connectors_active = false;
13169
		encoder->connectors_active = false;
12684
		DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
13170
		DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
12685
			      encoder->base.base.id,
13171
			      encoder->base.base.id,
12686
			      encoder->base.name,
13172
			      encoder->base.name,
12687
			      encoder->base.crtc ? "enabled" : "disabled",
13173
			      encoder->base.crtc ? "enabled" : "disabled",
12688
			      pipe_name(pipe));
13174
			      pipe_name(pipe));
12689
	}
13175
	}
12690
 
13176
 
12691
	list_for_each_entry(connector, &dev->mode_config.connector_list,
13177
	list_for_each_entry(connector, &dev->mode_config.connector_list,
12692
			    base.head) {
13178
			    base.head) {
12693
		if (connector->get_hw_state(connector)) {
13179
		if (connector->get_hw_state(connector)) {
12694
			connector->base.dpms = DRM_MODE_DPMS_ON;
13180
			connector->base.dpms = DRM_MODE_DPMS_ON;
12695
			connector->encoder->connectors_active = true;
13181
			connector->encoder->connectors_active = true;
12696
			connector->base.encoder = &connector->encoder->base;
13182
			connector->base.encoder = &connector->encoder->base;
12697
		} else {
13183
		} else {
12698
			connector->base.dpms = DRM_MODE_DPMS_OFF;
13184
			connector->base.dpms = DRM_MODE_DPMS_OFF;
12699
			connector->base.encoder = NULL;
13185
			connector->base.encoder = NULL;
12700
		}
13186
		}
12701
		DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n",
13187
		DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n",
12702
			      connector->base.base.id,
13188
			      connector->base.base.id,
12703
			      connector->base.name,
13189
			      connector->base.name,
12704
			      connector->base.encoder ? "enabled" : "disabled");
13190
			      connector->base.encoder ? "enabled" : "disabled");
12705
	}
13191
	}
12706
}
13192
}
12707
 
13193
 
12708
/* Scan out the current hw modeset state, sanitizes it and maps it into the drm
13194
/* Scan out the current hw modeset state, sanitizes it and maps it into the drm
12709
 * and i915 state tracking structures. */
13195
 * and i915 state tracking structures. */
12710
void intel_modeset_setup_hw_state(struct drm_device *dev,
13196
void intel_modeset_setup_hw_state(struct drm_device *dev,
12711
				  bool force_restore)
13197
				  bool force_restore)
12712
{
13198
{
12713
	struct drm_i915_private *dev_priv = dev->dev_private;
13199
	struct drm_i915_private *dev_priv = dev->dev_private;
12714
	enum pipe pipe;
13200
	enum pipe pipe;
12715
	struct intel_crtc *crtc;
13201
	struct intel_crtc *crtc;
12716
	struct intel_encoder *encoder;
13202
	struct intel_encoder *encoder;
12717
	int i;
13203
	int i;
12718
 
13204
 
12719
	intel_modeset_readout_hw_state(dev);
13205
	intel_modeset_readout_hw_state(dev);
12720
 
13206
 
12721
	/*
13207
	/*
12722
	 * Now that we have the config, copy it to each CRTC struct
13208
	 * Now that we have the config, copy it to each CRTC struct
12723
	 * Note that this could go away if we move to using crtc_config
13209
	 * Note that this could go away if we move to using crtc_config
12724
	 * checking everywhere.
13210
	 * checking everywhere.
12725
	 */
13211
	 */
12726
	for_each_intel_crtc(dev, crtc) {
13212
	for_each_intel_crtc(dev, crtc) {
12727
		if (crtc->active && i915.fastboot) {
13213
		if (crtc->active && i915.fastboot) {
12728
			intel_mode_from_pipe_config(&crtc->base.mode, &crtc->config);
13214
			intel_mode_from_pipe_config(&crtc->base.mode, &crtc->config);
12729
			DRM_DEBUG_KMS("[CRTC:%d] found active mode: ",
13215
			DRM_DEBUG_KMS("[CRTC:%d] found active mode: ",
12730
				      crtc->base.base.id);
13216
				      crtc->base.base.id);
12731
			drm_mode_debug_printmodeline(&crtc->base.mode);
13217
			drm_mode_debug_printmodeline(&crtc->base.mode);
12732
		}
13218
		}
12733
	}
13219
	}
12734
 
13220
 
12735
	/* HW state is read out, now we need to sanitize this mess. */
13221
	/* HW state is read out, now we need to sanitize this mess. */
12736
	list_for_each_entry(encoder, &dev->mode_config.encoder_list,
13222
	for_each_intel_encoder(dev, encoder) {
12737
			    base.head) {
-
 
12738
		intel_sanitize_encoder(encoder);
13223
		intel_sanitize_encoder(encoder);
12739
	}
13224
	}
12740
 
13225
 
12741
	for_each_pipe(pipe) {
13226
	for_each_pipe(dev_priv, pipe) {
12742
		crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
13227
		crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
12743
		intel_sanitize_crtc(crtc);
13228
		intel_sanitize_crtc(crtc);
12744
		intel_dump_pipe_config(crtc, &crtc->config, "[setup_hw_state]");
13229
		intel_dump_pipe_config(crtc, &crtc->config, "[setup_hw_state]");
12745
	}
13230
	}
12746
 
13231
 
12747
	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
13232
	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
12748
		struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
13233
		struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
12749
 
13234
 
12750
		if (!pll->on || pll->active)
13235
		if (!pll->on || pll->active)
12751
			continue;
13236
			continue;
12752
 
13237
 
12753
		DRM_DEBUG_KMS("%s enabled but not in use, disabling\n", pll->name);
13238
		DRM_DEBUG_KMS("%s enabled but not in use, disabling\n", pll->name);
12754
 
13239
 
12755
		pll->disable(dev_priv, pll);
13240
		pll->disable(dev_priv, pll);
12756
		pll->on = false;
13241
		pll->on = false;
12757
	}
13242
	}
-
 
13243
 
-
 
13244
	if (IS_GEN9(dev))
12758
 
13245
		skl_wm_get_hw_state(dev);
12759
	if (HAS_PCH_SPLIT(dev))
13246
	else if (HAS_PCH_SPLIT(dev))
12760
		ilk_wm_get_hw_state(dev);
13247
		ilk_wm_get_hw_state(dev);
12761
 
13248
 
12762
	if (force_restore) {
13249
	if (force_restore) {
12763
		i915_redisable_vga(dev);
13250
		i915_redisable_vga(dev);
12764
 
13251
 
12765
		/*
13252
		/*
12766
		 * We need to use raw interfaces for restoring state to avoid
13253
		 * We need to use raw interfaces for restoring state to avoid
12767
		 * checking (bogus) intermediate states.
13254
		 * checking (bogus) intermediate states.
12768
		 */
13255
		 */
12769
		for_each_pipe(pipe) {
13256
		for_each_pipe(dev_priv, pipe) {
12770
			struct drm_crtc *crtc =
13257
			struct drm_crtc *crtc =
12771
				dev_priv->pipe_to_crtc_mapping[pipe];
13258
				dev_priv->pipe_to_crtc_mapping[pipe];
12772
 
13259
 
12773
			__intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y,
13260
			intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y,
12774
					 crtc->primary->fb);
13261
					 crtc->primary->fb);
12775
		}
13262
		}
12776
	} else {
13263
	} else {
12777
	intel_modeset_update_staged_output_state(dev);
13264
	intel_modeset_update_staged_output_state(dev);
12778
	}
13265
	}
12779
 
13266
 
12780
	intel_modeset_check_state(dev);
13267
	intel_modeset_check_state(dev);
12781
}
13268
}
12782
 
13269
 
12783
void intel_modeset_gem_init(struct drm_device *dev)
13270
void intel_modeset_gem_init(struct drm_device *dev)
12784
{
13271
{
-
 
13272
	struct drm_i915_private *dev_priv = dev->dev_private;
12785
	struct drm_crtc *c;
13273
	struct drm_crtc *c;
12786
	struct drm_i915_gem_object *obj;
13274
	struct drm_i915_gem_object *obj;
12787
 
13275
 
12788
	mutex_lock(&dev->struct_mutex);
13276
	mutex_lock(&dev->struct_mutex);
12789
	intel_init_gt_powersave(dev);
13277
	intel_init_gt_powersave(dev);
12790
	mutex_unlock(&dev->struct_mutex);
13278
	mutex_unlock(&dev->struct_mutex);
-
 
13279
 
-
 
13280
	/*
-
 
13281
	 * There may be no VBT; and if the BIOS enabled SSC we can
-
 
13282
	 * just keep using it to avoid unnecessary flicker.  Whereas if the
-
 
13283
	 * BIOS isn't using it, don't assume it will work even if the VBT
-
 
13284
	 * indicates as much.
-
 
13285
	 */
-
 
13286
	if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
-
 
13287
		dev_priv->vbt.lvds_use_ssc = !!(I915_READ(PCH_DREF_CONTROL) &
-
 
13288
						DREF_SSC1_ENABLE);
12791
 
13289
 
12792
	intel_modeset_init_hw(dev);
13290
	intel_modeset_init_hw(dev);
12793
 
13291
 
12794
//   intel_setup_overlay(dev);
13292
//   intel_setup_overlay(dev);
12795
 
13293
 
12796
	/*
13294
	/*
12797
	 * Make sure any fbs we allocated at startup are properly
13295
	 * Make sure any fbs we allocated at startup are properly
12798
	 * pinned & fenced.  When we do the allocation it's too early
13296
	 * pinned & fenced.  When we do the allocation it's too early
12799
	 * for this.
13297
	 * for this.
12800
	 */
13298
	 */
12801
	mutex_lock(&dev->struct_mutex);
13299
	mutex_lock(&dev->struct_mutex);
12802
	for_each_crtc(dev, c) {
13300
	for_each_crtc(dev, c) {
12803
		obj = intel_fb_obj(c->primary->fb);
13301
		obj = intel_fb_obj(c->primary->fb);
12804
		if (obj == NULL)
13302
		if (obj == NULL)
12805
			continue;
13303
			continue;
12806
 
13304
 
-
 
13305
		if (intel_pin_and_fence_fb_obj(c->primary,
-
 
13306
					       c->primary->fb,
12807
		if (intel_pin_and_fence_fb_obj(dev, obj, NULL)) {
13307
					       NULL)) {
12808
			DRM_ERROR("failed to pin boot fb on pipe %d\n",
13308
			DRM_ERROR("failed to pin boot fb on pipe %d\n",
12809
				  to_intel_crtc(c)->pipe);
13309
				  to_intel_crtc(c)->pipe);
12810
			drm_framebuffer_unreference(c->primary->fb);
13310
			drm_framebuffer_unreference(c->primary->fb);
12811
			c->primary->fb = NULL;
13311
			c->primary->fb = NULL;
12812
		}
13312
		}
12813
	}
13313
	}
12814
	mutex_unlock(&dev->struct_mutex);
13314
	mutex_unlock(&dev->struct_mutex);
12815
}
13315
}
12816
 
13316
 
12817
void intel_connector_unregister(struct intel_connector *intel_connector)
13317
void intel_connector_unregister(struct intel_connector *intel_connector)
12818
{
13318
{
12819
	struct drm_connector *connector = &intel_connector->base;
13319
	struct drm_connector *connector = &intel_connector->base;
12820
 
13320
 
12821
	intel_panel_destroy_backlight(connector);
13321
	intel_panel_destroy_backlight(connector);
12822
	drm_connector_unregister(connector);
13322
	drm_connector_unregister(connector);
12823
}
13323
}
12824
 
13324
 
12825
void intel_modeset_cleanup(struct drm_device *dev)
13325
void intel_modeset_cleanup(struct drm_device *dev)
12826
{
13326
{
12827
#if 0
13327
#if 0
12828
	struct drm_i915_private *dev_priv = dev->dev_private;
13328
	struct drm_i915_private *dev_priv = dev->dev_private;
12829
	struct drm_connector *connector;
13329
	struct drm_connector *connector;
-
 
13330
 
-
 
13331
	intel_disable_gt_powersave(dev);
-
 
13332
 
-
 
13333
	intel_backlight_unregister(dev);
12830
 
13334
 
12831
	/*
13335
	/*
12832
	 * Interrupts and polling as the first thing to avoid creating havoc.
13336
	 * Interrupts and polling as the first thing to avoid creating havoc.
12833
	 * Too much stuff here (turning of rps, connectors, ...) would
13337
	 * Too much stuff here (turning of connectors, ...) would
12834
	 * experience fancy races otherwise.
13338
	 * experience fancy races otherwise.
12835
	 */
13339
	 */
12836
	drm_irq_uninstall(dev);
-
 
12837
	intel_hpd_cancel_work(dev_priv);
-
 
12838
	dev_priv->pm._irqs_disabled = true;
13340
	intel_irq_uninstall(dev_priv);
12839
 
13341
 
12840
	/*
13342
	/*
12841
	 * Due to the hpd irq storm handling the hotplug work can re-arm the
13343
	 * Due to the hpd irq storm handling the hotplug work can re-arm the
12842
	 * poll handlers. Hence disable polling after hpd handling is shut down.
13344
	 * poll handlers. Hence disable polling after hpd handling is shut down.
12843
	 */
13345
	 */
12844
	drm_kms_helper_poll_fini(dev);
13346
	drm_kms_helper_poll_fini(dev);
12845
 
13347
 
12846
	mutex_lock(&dev->struct_mutex);
13348
	mutex_lock(&dev->struct_mutex);
12847
 
13349
 
12848
	intel_unregister_dsm_handler();
13350
	intel_unregister_dsm_handler();
12849
 
13351
 
12850
	intel_disable_fbc(dev);
13352
	intel_disable_fbc(dev);
12851
 
-
 
12852
	intel_disable_gt_powersave(dev);
-
 
12853
 
13353
 
12854
	ironlake_teardown_rc6(dev);
13354
	ironlake_teardown_rc6(dev);
12855
 
13355
 
12856
	mutex_unlock(&dev->struct_mutex);
13356
	mutex_unlock(&dev->struct_mutex);
12857
 
13357
 
12858
	/* flush any delayed tasks or pending work */
13358
	/* flush any delayed tasks or pending work */
12859
	flush_scheduled_work();
13359
	flush_scheduled_work();
12860
 
13360
 
12861
	/* destroy the backlight and sysfs files before encoders/connectors */
13361
	/* destroy the backlight and sysfs files before encoders/connectors */
12862
	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
13362
	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
12863
		struct intel_connector *intel_connector;
13363
		struct intel_connector *intel_connector;
12864
 
13364
 
12865
		intel_connector = to_intel_connector(connector);
13365
		intel_connector = to_intel_connector(connector);
12866
		intel_connector->unregister(intel_connector);
13366
		intel_connector->unregister(intel_connector);
12867
	}
13367
	}
12868
 
13368
 
12869
	drm_mode_config_cleanup(dev);
13369
	drm_mode_config_cleanup(dev);
12870
 
13370
 
12871
	intel_cleanup_overlay(dev);
13371
	intel_cleanup_overlay(dev);
12872
 
13372
 
12873
	mutex_lock(&dev->struct_mutex);
13373
	mutex_lock(&dev->struct_mutex);
12874
	intel_cleanup_gt_powersave(dev);
13374
	intel_cleanup_gt_powersave(dev);
12875
	mutex_unlock(&dev->struct_mutex);
13375
	mutex_unlock(&dev->struct_mutex);
12876
#endif
13376
#endif
12877
}
13377
}
12878
 
13378
 
12879
/*
13379
/*
12880
 * Return which encoder is currently attached for connector.
13380
 * Return which encoder is currently attached for connector.
12881
 */
13381
 */
12882
struct drm_encoder *intel_best_encoder(struct drm_connector *connector)
13382
struct drm_encoder *intel_best_encoder(struct drm_connector *connector)
12883
{
13383
{
12884
	return &intel_attached_encoder(connector)->base;
13384
	return &intel_attached_encoder(connector)->base;
12885
}
13385
}
12886
 
13386
 
12887
void intel_connector_attach_encoder(struct intel_connector *connector,
13387
void intel_connector_attach_encoder(struct intel_connector *connector,
12888
				    struct intel_encoder *encoder)
13388
				    struct intel_encoder *encoder)
12889
{
13389
{
12890
	connector->encoder = encoder;
13390
	connector->encoder = encoder;
12891
	drm_mode_connector_attach_encoder(&connector->base,
13391
	drm_mode_connector_attach_encoder(&connector->base,
12892
					  &encoder->base);
13392
					  &encoder->base);
12893
}
13393
}
12894
 
13394
 
12895
/*
13395
/*
12896
 * set vga decode state - true == enable VGA decode
13396
 * set vga decode state - true == enable VGA decode
12897
 */
13397
 */
12898
int intel_modeset_vga_set_state(struct drm_device *dev, bool state)
13398
int intel_modeset_vga_set_state(struct drm_device *dev, bool state)
12899
{
13399
{
12900
	struct drm_i915_private *dev_priv = dev->dev_private;
13400
	struct drm_i915_private *dev_priv = dev->dev_private;
12901
	unsigned reg = INTEL_INFO(dev)->gen >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
13401
	unsigned reg = INTEL_INFO(dev)->gen >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
12902
	u16 gmch_ctrl;
13402
	u16 gmch_ctrl;
12903
 
13403
 
12904
	if (pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl)) {
13404
	if (pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl)) {
12905
		DRM_ERROR("failed to read control word\n");
13405
		DRM_ERROR("failed to read control word\n");
12906
		return -EIO;
13406
		return -EIO;
12907
	}
13407
	}
12908
 
13408
 
12909
	if (!!(gmch_ctrl & INTEL_GMCH_VGA_DISABLE) == !state)
13409
	if (!!(gmch_ctrl & INTEL_GMCH_VGA_DISABLE) == !state)
12910
		return 0;
13410
		return 0;
12911
 
13411
 
12912
	if (state)
13412
	if (state)
12913
		gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
13413
		gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
12914
	else
13414
	else
12915
		gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
13415
		gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
12916
 
13416
 
12917
	if (pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl)) {
13417
	if (pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl)) {
12918
		DRM_ERROR("failed to write control word\n");
13418
		DRM_ERROR("failed to write control word\n");
12919
		return -EIO;
13419
		return -EIO;
12920
	}
13420
	}
12921
 
13421
 
12922
	return 0;
13422
	return 0;
12923
}
13423
}
12924
 
13424
 
12925
#ifdef CONFIG_DEBUG_FS
13425
#ifdef CONFIG_DEBUG_FS
12926
 
13426
 
12927
struct intel_display_error_state {
13427
struct intel_display_error_state {
12928
 
13428
 
12929
	u32 power_well_driver;
13429
	u32 power_well_driver;
12930
 
13430
 
12931
	int num_transcoders;
13431
	int num_transcoders;
12932
 
13432
 
12933
	struct intel_cursor_error_state {
13433
	struct intel_cursor_error_state {
12934
		u32 control;
13434
		u32 control;
12935
		u32 position;
13435
		u32 position;
12936
		u32 base;
13436
		u32 base;
12937
		u32 size;
13437
		u32 size;
12938
	} cursor[I915_MAX_PIPES];
13438
	} cursor[I915_MAX_PIPES];
12939
 
13439
 
12940
	struct intel_pipe_error_state {
13440
	struct intel_pipe_error_state {
12941
		bool power_domain_on;
13441
		bool power_domain_on;
12942
		u32 source;
13442
		u32 source;
12943
		u32 stat;
13443
		u32 stat;
12944
	} pipe[I915_MAX_PIPES];
13444
	} pipe[I915_MAX_PIPES];
12945
 
13445
 
12946
	struct intel_plane_error_state {
13446
	struct intel_plane_error_state {
12947
		u32 control;
13447
		u32 control;
12948
		u32 stride;
13448
		u32 stride;
12949
		u32 size;
13449
		u32 size;
12950
		u32 pos;
13450
		u32 pos;
12951
		u32 addr;
13451
		u32 addr;
12952
		u32 surface;
13452
		u32 surface;
12953
		u32 tile_offset;
13453
		u32 tile_offset;
12954
	} plane[I915_MAX_PIPES];
13454
	} plane[I915_MAX_PIPES];
12955
 
13455
 
12956
	struct intel_transcoder_error_state {
13456
	struct intel_transcoder_error_state {
12957
		bool power_domain_on;
13457
		bool power_domain_on;
12958
		enum transcoder cpu_transcoder;
13458
		enum transcoder cpu_transcoder;
12959
 
13459
 
12960
		u32 conf;
13460
		u32 conf;
12961
 
13461
 
12962
		u32 htotal;
13462
		u32 htotal;
12963
		u32 hblank;
13463
		u32 hblank;
12964
		u32 hsync;
13464
		u32 hsync;
12965
		u32 vtotal;
13465
		u32 vtotal;
12966
		u32 vblank;
13466
		u32 vblank;
12967
		u32 vsync;
13467
		u32 vsync;
12968
	} transcoder[4];
13468
	} transcoder[4];
12969
};
13469
};
12970
 
13470
 
12971
struct intel_display_error_state *
13471
struct intel_display_error_state *
12972
intel_display_capture_error_state(struct drm_device *dev)
13472
intel_display_capture_error_state(struct drm_device *dev)
12973
{
13473
{
12974
	struct drm_i915_private *dev_priv = dev->dev_private;
13474
	struct drm_i915_private *dev_priv = dev->dev_private;
12975
	struct intel_display_error_state *error;
13475
	struct intel_display_error_state *error;
12976
	int transcoders[] = {
13476
	int transcoders[] = {
12977
		TRANSCODER_A,
13477
		TRANSCODER_A,
12978
		TRANSCODER_B,
13478
		TRANSCODER_B,
12979
		TRANSCODER_C,
13479
		TRANSCODER_C,
12980
		TRANSCODER_EDP,
13480
		TRANSCODER_EDP,
12981
	};
13481
	};
12982
	int i;
13482
	int i;
12983
 
13483
 
12984
	if (INTEL_INFO(dev)->num_pipes == 0)
13484
	if (INTEL_INFO(dev)->num_pipes == 0)
12985
		return NULL;
13485
		return NULL;
12986
 
13486
 
12987
	error = kzalloc(sizeof(*error), GFP_ATOMIC);
13487
	error = kzalloc(sizeof(*error), GFP_ATOMIC);
12988
	if (error == NULL)
13488
	if (error == NULL)
12989
		return NULL;
13489
		return NULL;
12990
 
13490
 
12991
	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
13491
	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
12992
		error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER);
13492
		error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER);
12993
 
13493
 
12994
	for_each_pipe(i) {
13494
	for_each_pipe(dev_priv, i) {
12995
		error->pipe[i].power_domain_on =
13495
		error->pipe[i].power_domain_on =
12996
			intel_display_power_enabled_unlocked(dev_priv,
13496
			__intel_display_power_is_enabled(dev_priv,
12997
						       POWER_DOMAIN_PIPE(i));
13497
						       POWER_DOMAIN_PIPE(i));
12998
		if (!error->pipe[i].power_domain_on)
13498
		if (!error->pipe[i].power_domain_on)
12999
			continue;
13499
			continue;
13000
 
13500
 
13001
		error->cursor[i].control = I915_READ(CURCNTR(i));
13501
		error->cursor[i].control = I915_READ(CURCNTR(i));
13002
		error->cursor[i].position = I915_READ(CURPOS(i));
13502
		error->cursor[i].position = I915_READ(CURPOS(i));
13003
		error->cursor[i].base = I915_READ(CURBASE(i));
13503
		error->cursor[i].base = I915_READ(CURBASE(i));
13004
 
13504
 
13005
		error->plane[i].control = I915_READ(DSPCNTR(i));
13505
		error->plane[i].control = I915_READ(DSPCNTR(i));
13006
		error->plane[i].stride = I915_READ(DSPSTRIDE(i));
13506
		error->plane[i].stride = I915_READ(DSPSTRIDE(i));
13007
		if (INTEL_INFO(dev)->gen <= 3) {
13507
		if (INTEL_INFO(dev)->gen <= 3) {
13008
		error->plane[i].size = I915_READ(DSPSIZE(i));
13508
		error->plane[i].size = I915_READ(DSPSIZE(i));
13009
		error->plane[i].pos = I915_READ(DSPPOS(i));
13509
		error->plane[i].pos = I915_READ(DSPPOS(i));
13010
		}
13510
		}
13011
		if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
13511
		if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
13012
		error->plane[i].addr = I915_READ(DSPADDR(i));
13512
		error->plane[i].addr = I915_READ(DSPADDR(i));
13013
		if (INTEL_INFO(dev)->gen >= 4) {
13513
		if (INTEL_INFO(dev)->gen >= 4) {
13014
			error->plane[i].surface = I915_READ(DSPSURF(i));
13514
			error->plane[i].surface = I915_READ(DSPSURF(i));
13015
			error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
13515
			error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
13016
		}
13516
		}
13017
 
13517
 
13018
		error->pipe[i].source = I915_READ(PIPESRC(i));
13518
		error->pipe[i].source = I915_READ(PIPESRC(i));
13019
 
13519
 
13020
		if (HAS_GMCH_DISPLAY(dev))
13520
		if (HAS_GMCH_DISPLAY(dev))
13021
			error->pipe[i].stat = I915_READ(PIPESTAT(i));
13521
			error->pipe[i].stat = I915_READ(PIPESTAT(i));
13022
	}
13522
	}
13023
 
13523
 
13024
	error->num_transcoders = INTEL_INFO(dev)->num_pipes;
13524
	error->num_transcoders = INTEL_INFO(dev)->num_pipes;
13025
	if (HAS_DDI(dev_priv->dev))
13525
	if (HAS_DDI(dev_priv->dev))
13026
		error->num_transcoders++; /* Account for eDP. */
13526
		error->num_transcoders++; /* Account for eDP. */
13027
 
13527
 
13028
	for (i = 0; i < error->num_transcoders; i++) {
13528
	for (i = 0; i < error->num_transcoders; i++) {
13029
		enum transcoder cpu_transcoder = transcoders[i];
13529
		enum transcoder cpu_transcoder = transcoders[i];
13030
 
13530
 
13031
		error->transcoder[i].power_domain_on =
13531
		error->transcoder[i].power_domain_on =
13032
			intel_display_power_enabled_unlocked(dev_priv,
13532
			__intel_display_power_is_enabled(dev_priv,
13033
				POWER_DOMAIN_TRANSCODER(cpu_transcoder));
13533
				POWER_DOMAIN_TRANSCODER(cpu_transcoder));
13034
		if (!error->transcoder[i].power_domain_on)
13534
		if (!error->transcoder[i].power_domain_on)
13035
			continue;
13535
			continue;
13036
 
13536
 
13037
		error->transcoder[i].cpu_transcoder = cpu_transcoder;
13537
		error->transcoder[i].cpu_transcoder = cpu_transcoder;
13038
 
13538
 
13039
		error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder));
13539
		error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder));
13040
		error->transcoder[i].htotal = I915_READ(HTOTAL(cpu_transcoder));
13540
		error->transcoder[i].htotal = I915_READ(HTOTAL(cpu_transcoder));
13041
		error->transcoder[i].hblank = I915_READ(HBLANK(cpu_transcoder));
13541
		error->transcoder[i].hblank = I915_READ(HBLANK(cpu_transcoder));
13042
		error->transcoder[i].hsync = I915_READ(HSYNC(cpu_transcoder));
13542
		error->transcoder[i].hsync = I915_READ(HSYNC(cpu_transcoder));
13043
		error->transcoder[i].vtotal = I915_READ(VTOTAL(cpu_transcoder));
13543
		error->transcoder[i].vtotal = I915_READ(VTOTAL(cpu_transcoder));
13044
		error->transcoder[i].vblank = I915_READ(VBLANK(cpu_transcoder));
13544
		error->transcoder[i].vblank = I915_READ(VBLANK(cpu_transcoder));
13045
		error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder));
13545
		error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder));
13046
	}
13546
	}
13047
 
13547
 
13048
	return error;
13548
	return error;
13049
}
13549
}
13050
 
13550
 
13051
#define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
13551
#define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
13052
 
13552
 
13053
void
13553
void
13054
intel_display_print_error_state(struct drm_i915_error_state_buf *m,
13554
intel_display_print_error_state(struct drm_i915_error_state_buf *m,
13055
				struct drm_device *dev,
13555
				struct drm_device *dev,
13056
				struct intel_display_error_state *error)
13556
				struct intel_display_error_state *error)
13057
{
13557
{
-
 
13558
	struct drm_i915_private *dev_priv = dev->dev_private;
13058
	int i;
13559
	int i;
13059
 
13560
 
13060
	if (!error)
13561
	if (!error)
13061
		return;
13562
		return;
13062
 
13563
 
13063
	err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev)->num_pipes);
13564
	err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev)->num_pipes);
13064
	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
13565
	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
13065
		err_printf(m, "PWR_WELL_CTL2: %08x\n",
13566
		err_printf(m, "PWR_WELL_CTL2: %08x\n",
13066
			   error->power_well_driver);
13567
			   error->power_well_driver);
13067
	for_each_pipe(i) {
13568
	for_each_pipe(dev_priv, i) {
13068
		err_printf(m, "Pipe [%d]:\n", i);
13569
		err_printf(m, "Pipe [%d]:\n", i);
13069
		err_printf(m, "  Power: %s\n",
13570
		err_printf(m, "  Power: %s\n",
13070
			   error->pipe[i].power_domain_on ? "on" : "off");
13571
			   error->pipe[i].power_domain_on ? "on" : "off");
13071
		err_printf(m, "  SRC: %08x\n", error->pipe[i].source);
13572
		err_printf(m, "  SRC: %08x\n", error->pipe[i].source);
13072
		err_printf(m, "  STAT: %08x\n", error->pipe[i].stat);
13573
		err_printf(m, "  STAT: %08x\n", error->pipe[i].stat);
13073
 
13574
 
13074
		err_printf(m, "Plane [%d]:\n", i);
13575
		err_printf(m, "Plane [%d]:\n", i);
13075
		err_printf(m, "  CNTR: %08x\n", error->plane[i].control);
13576
		err_printf(m, "  CNTR: %08x\n", error->plane[i].control);
13076
		err_printf(m, "  STRIDE: %08x\n", error->plane[i].stride);
13577
		err_printf(m, "  STRIDE: %08x\n", error->plane[i].stride);
13077
		if (INTEL_INFO(dev)->gen <= 3) {
13578
		if (INTEL_INFO(dev)->gen <= 3) {
13078
			err_printf(m, "  SIZE: %08x\n", error->plane[i].size);
13579
			err_printf(m, "  SIZE: %08x\n", error->plane[i].size);
13079
			err_printf(m, "  POS: %08x\n", error->plane[i].pos);
13580
			err_printf(m, "  POS: %08x\n", error->plane[i].pos);
13080
		}
13581
		}
13081
		if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
13582
		if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
13082
			err_printf(m, "  ADDR: %08x\n", error->plane[i].addr);
13583
			err_printf(m, "  ADDR: %08x\n", error->plane[i].addr);
13083
		if (INTEL_INFO(dev)->gen >= 4) {
13584
		if (INTEL_INFO(dev)->gen >= 4) {
13084
			err_printf(m, "  SURF: %08x\n", error->plane[i].surface);
13585
			err_printf(m, "  SURF: %08x\n", error->plane[i].surface);
13085
			err_printf(m, "  TILEOFF: %08x\n", error->plane[i].tile_offset);
13586
			err_printf(m, "  TILEOFF: %08x\n", error->plane[i].tile_offset);
13086
		}
13587
		}
13087
 
13588
 
13088
		err_printf(m, "Cursor [%d]:\n", i);
13589
		err_printf(m, "Cursor [%d]:\n", i);
13089
		err_printf(m, "  CNTR: %08x\n", error->cursor[i].control);
13590
		err_printf(m, "  CNTR: %08x\n", error->cursor[i].control);
13090
		err_printf(m, "  POS: %08x\n", error->cursor[i].position);
13591
		err_printf(m, "  POS: %08x\n", error->cursor[i].position);
13091
		err_printf(m, "  BASE: %08x\n", error->cursor[i].base);
13592
		err_printf(m, "  BASE: %08x\n", error->cursor[i].base);
13092
	}
13593
	}
13093
 
13594
 
13094
	for (i = 0; i < error->num_transcoders; i++) {
13595
	for (i = 0; i < error->num_transcoders; i++) {
13095
		err_printf(m, "CPU transcoder: %c\n",
13596
		err_printf(m, "CPU transcoder: %c\n",
13096
			   transcoder_name(error->transcoder[i].cpu_transcoder));
13597
			   transcoder_name(error->transcoder[i].cpu_transcoder));
13097
		err_printf(m, "  Power: %s\n",
13598
		err_printf(m, "  Power: %s\n",
13098
			   error->transcoder[i].power_domain_on ? "on" : "off");
13599
			   error->transcoder[i].power_domain_on ? "on" : "off");
13099
		err_printf(m, "  CONF: %08x\n", error->transcoder[i].conf);
13600
		err_printf(m, "  CONF: %08x\n", error->transcoder[i].conf);
13100
		err_printf(m, "  HTOTAL: %08x\n", error->transcoder[i].htotal);
13601
		err_printf(m, "  HTOTAL: %08x\n", error->transcoder[i].htotal);
13101
		err_printf(m, "  HBLANK: %08x\n", error->transcoder[i].hblank);
13602
		err_printf(m, "  HBLANK: %08x\n", error->transcoder[i].hblank);
13102
		err_printf(m, "  HSYNC: %08x\n", error->transcoder[i].hsync);
13603
		err_printf(m, "  HSYNC: %08x\n", error->transcoder[i].hsync);
13103
		err_printf(m, "  VTOTAL: %08x\n", error->transcoder[i].vtotal);
13604
		err_printf(m, "  VTOTAL: %08x\n", error->transcoder[i].vtotal);
13104
		err_printf(m, "  VBLANK: %08x\n", error->transcoder[i].vblank);
13605
		err_printf(m, "  VBLANK: %08x\n", error->transcoder[i].vblank);
13105
		err_printf(m, "  VSYNC: %08x\n", error->transcoder[i].vsync);
13606
		err_printf(m, "  VSYNC: %08x\n", error->transcoder[i].vsync);
13106
	}
13607
	}
13107
}
13608
}
13108
#endif
13609
#endif
-
 
13610
 
-
 
13611
void intel_modeset_preclose(struct drm_device *dev, struct drm_file *file)
-
 
13612
{
-
 
13613
	struct intel_crtc *crtc;
-
 
13614
 
-
 
13615
	for_each_intel_crtc(dev, crtc) {
-
 
13616
		struct intel_unpin_work *work;
-
 
13617
 
-
 
13618
		spin_lock_irq(&dev->event_lock);
-
 
13619
 
-
 
13620
		work = crtc->unpin_work;
-
 
13621
 
-
 
13622
		if (work && work->event &&
-
 
13623
		    work->event->base.file_priv == file) {
-
 
13624
			kfree(work->event);
-
 
13625
			work->event = NULL;
-
 
13626
		}
-
 
13627
 
-
 
13628
		spin_unlock_irq(&dev->event_lock);
-
 
13629
	}
-
 
13630
}