Subversion Repositories Kolibri OS

Rev

Rev 6084 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 6084 Rev 6937
1
/*
1
/*
2
 * Copyright © 2015 Intel Corporation
2
 * Copyright © 2015 Intel Corporation
3
 *
3
 *
4
 * Permission is hereby granted, free of charge, to any person obtaining a
4
 * Permission is hereby granted, free of charge, to any person obtaining a
5
 * copy of this software and associated documentation files (the "Software"),
5
 * copy of this software and associated documentation files (the "Software"),
6
 * to deal in the Software without restriction, including without limitation
6
 * to deal in the Software without restriction, including without limitation
7
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
7
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
 * and/or sell copies of the Software, and to permit persons to whom the
8
 * and/or sell copies of the Software, and to permit persons to whom the
9
 * Software is furnished to do so, subject to the following conditions:
9
 * Software is furnished to do so, subject to the following conditions:
10
 *
10
 *
11
 * The above copyright notice and this permission notice (including the next
11
 * The above copyright notice and this permission notice (including the next
12
 * paragraph) shall be included in all copies or substantial portions of the
12
 * paragraph) shall be included in all copies or substantial portions of the
13
 * Software.
13
 * Software.
14
 *
14
 *
15
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21
 * DEALINGS IN THE SOFTWARE.
21
 * DEALINGS IN THE SOFTWARE.
22
 */
22
 */
23
 
23
 
24
/**
24
/**
25
 * DOC: atomic modeset support
25
 * DOC: atomic modeset support
26
 *
26
 *
27
 * The functions here implement the state management and hardware programming
27
 * The functions here implement the state management and hardware programming
28
 * dispatch required by the atomic modeset infrastructure.
28
 * dispatch required by the atomic modeset infrastructure.
29
 * See intel_atomic_plane.c for the plane-specific atomic functionality.
29
 * See intel_atomic_plane.c for the plane-specific atomic functionality.
30
 */
30
 */
31
 
31
 
32
#include 
32
#include 
33
#include 
33
#include 
34
#include 
34
#include 
35
#include 
35
#include 
36
#include "intel_drv.h"
36
#include "intel_drv.h"
37
 
37
 
38
/**
38
/**
39
 * intel_connector_atomic_get_property - fetch connector property value
39
 * intel_connector_atomic_get_property - fetch connector property value
40
 * @connector: connector to fetch property for
40
 * @connector: connector to fetch property for
41
 * @state: state containing the property value
41
 * @state: state containing the property value
42
 * @property: property to look up
42
 * @property: property to look up
43
 * @val: pointer to write property value into
43
 * @val: pointer to write property value into
44
 *
44
 *
45
 * The DRM core does not store shadow copies of properties for
45
 * The DRM core does not store shadow copies of properties for
46
 * atomic-capable drivers.  This entrypoint is used to fetch
46
 * atomic-capable drivers.  This entrypoint is used to fetch
47
 * the current value of a driver-specific connector property.
47
 * the current value of a driver-specific connector property.
48
 */
48
 */
49
int
49
int
50
intel_connector_atomic_get_property(struct drm_connector *connector,
50
intel_connector_atomic_get_property(struct drm_connector *connector,
51
				    const struct drm_connector_state *state,
51
				    const struct drm_connector_state *state,
52
				    struct drm_property *property,
52
				    struct drm_property *property,
53
				    uint64_t *val)
53
				    uint64_t *val)
54
{
54
{
55
	int i;
55
	int i;
56
 
56
 
57
	/*
57
	/*
58
	 * TODO: We only have atomic modeset for planes at the moment, so the
58
	 * TODO: We only have atomic modeset for planes at the moment, so the
59
	 * crtc/connector code isn't quite ready yet.  Until it's ready,
59
	 * crtc/connector code isn't quite ready yet.  Until it's ready,
60
	 * continue to look up all property values in the DRM's shadow copy
60
	 * continue to look up all property values in the DRM's shadow copy
61
	 * in obj->properties->values[].
61
	 * in obj->properties->values[].
62
	 *
62
	 *
63
	 * When the crtc/connector state work matures, this function should
63
	 * When the crtc/connector state work matures, this function should
64
	 * be updated to read the values out of the state structure instead.
64
	 * be updated to read the values out of the state structure instead.
65
	 */
65
	 */
66
	for (i = 0; i < connector->base.properties->count; i++) {
66
	for (i = 0; i < connector->base.properties->count; i++) {
67
		if (connector->base.properties->properties[i] == property) {
67
		if (connector->base.properties->properties[i] == property) {
68
			*val = connector->base.properties->values[i];
68
			*val = connector->base.properties->values[i];
69
			return 0;
69
			return 0;
70
		}
70
		}
71
	}
71
	}
72
 
72
 
73
	return -EINVAL;
73
	return -EINVAL;
74
}
74
}
75
 
75
 
76
/*
76
/*
77
 * intel_crtc_duplicate_state - duplicate crtc state
77
 * intel_crtc_duplicate_state - duplicate crtc state
78
 * @crtc: drm crtc
78
 * @crtc: drm crtc
79
 *
79
 *
80
 * Allocates and returns a copy of the crtc state (both common and
80
 * Allocates and returns a copy of the crtc state (both common and
81
 * Intel-specific) for the specified crtc.
81
 * Intel-specific) for the specified crtc.
82
 *
82
 *
83
 * Returns: The newly allocated crtc state, or NULL on failure.
83
 * Returns: The newly allocated crtc state, or NULL on failure.
84
 */
84
 */
85
struct drm_crtc_state *
85
struct drm_crtc_state *
86
intel_crtc_duplicate_state(struct drm_crtc *crtc)
86
intel_crtc_duplicate_state(struct drm_crtc *crtc)
87
{
87
{
88
	struct intel_crtc_state *crtc_state;
88
	struct intel_crtc_state *crtc_state;
89
 
89
 
90
	crtc_state = kmemdup(crtc->state, sizeof(*crtc_state), GFP_KERNEL);
90
	crtc_state = kmemdup(crtc->state, sizeof(*crtc_state), GFP_KERNEL);
91
	if (!crtc_state)
91
	if (!crtc_state)
92
		return NULL;
92
		return NULL;
93
 
93
 
94
	__drm_atomic_helper_crtc_duplicate_state(crtc, &crtc_state->base);
94
	__drm_atomic_helper_crtc_duplicate_state(crtc, &crtc_state->base);
95
 
95
 
96
	crtc_state->update_pipe = false;
96
	crtc_state->update_pipe = false;
-
 
97
	crtc_state->disable_lp_wm = false;
-
 
98
	crtc_state->disable_cxsr = false;
-
 
99
	crtc_state->update_wm_pre = false;
-
 
100
	crtc_state->update_wm_post = false;
97
 
101
 
98
	return &crtc_state->base;
102
	return &crtc_state->base;
99
}
103
}
100
 
104
 
101
/**
105
/**
102
 * intel_crtc_destroy_state - destroy crtc state
106
 * intel_crtc_destroy_state - destroy crtc state
103
 * @crtc: drm crtc
107
 * @crtc: drm crtc
104
 *
108
 *
105
 * Destroys the crtc state (both common and Intel-specific) for the
109
 * Destroys the crtc state (both common and Intel-specific) for the
106
 * specified crtc.
110
 * specified crtc.
107
 */
111
 */
108
void
112
void
109
intel_crtc_destroy_state(struct drm_crtc *crtc,
113
intel_crtc_destroy_state(struct drm_crtc *crtc,
110
			  struct drm_crtc_state *state)
114
			  struct drm_crtc_state *state)
111
{
115
{
112
	drm_atomic_helper_crtc_destroy_state(crtc, state);
116
	drm_atomic_helper_crtc_destroy_state(crtc, state);
113
}
117
}
114
 
118
 
115
/**
119
/**
116
 * intel_atomic_setup_scalers() - setup scalers for crtc per staged requests
120
 * intel_atomic_setup_scalers() - setup scalers for crtc per staged requests
117
 * @dev: DRM device
121
 * @dev: DRM device
118
 * @crtc: intel crtc
122
 * @crtc: intel crtc
119
 * @crtc_state: incoming crtc_state to validate and setup scalers
123
 * @crtc_state: incoming crtc_state to validate and setup scalers
120
 *
124
 *
121
 * This function sets up scalers based on staged scaling requests for
125
 * This function sets up scalers based on staged scaling requests for
122
 * a @crtc and its planes. It is called from crtc level check path. If request
126
 * a @crtc and its planes. It is called from crtc level check path. If request
123
 * is a supportable request, it attaches scalers to requested planes and crtc.
127
 * is a supportable request, it attaches scalers to requested planes and crtc.
124
 *
128
 *
125
 * This function takes into account the current scaler(s) in use by any planes
129
 * This function takes into account the current scaler(s) in use by any planes
126
 * not being part of this atomic state
130
 * not being part of this atomic state
127
 *
131
 *
128
 *  Returns:
132
 *  Returns:
129
 *         0 - scalers were setup succesfully
133
 *         0 - scalers were setup succesfully
130
 *         error code - otherwise
134
 *         error code - otherwise
131
 */
135
 */
132
int intel_atomic_setup_scalers(struct drm_device *dev,
136
int intel_atomic_setup_scalers(struct drm_device *dev,
133
	struct intel_crtc *intel_crtc,
137
	struct intel_crtc *intel_crtc,
134
	struct intel_crtc_state *crtc_state)
138
	struct intel_crtc_state *crtc_state)
135
{
139
{
136
	struct drm_plane *plane = NULL;
140
	struct drm_plane *plane = NULL;
137
	struct intel_plane *intel_plane;
141
	struct intel_plane *intel_plane;
138
	struct intel_plane_state *plane_state = NULL;
142
	struct intel_plane_state *plane_state = NULL;
139
	struct intel_crtc_scaler_state *scaler_state =
143
	struct intel_crtc_scaler_state *scaler_state =
140
		&crtc_state->scaler_state;
144
		&crtc_state->scaler_state;
141
	struct drm_atomic_state *drm_state = crtc_state->base.state;
145
	struct drm_atomic_state *drm_state = crtc_state->base.state;
142
	int num_scalers_need;
146
	int num_scalers_need;
143
	int i, j;
147
	int i, j;
144
 
148
 
145
	num_scalers_need = hweight32(scaler_state->scaler_users);
149
	num_scalers_need = hweight32(scaler_state->scaler_users);
146
 
150
 
147
	/*
151
	/*
148
	 * High level flow:
152
	 * High level flow:
149
	 * - staged scaler requests are already in scaler_state->scaler_users
153
	 * - staged scaler requests are already in scaler_state->scaler_users
150
	 * - check whether staged scaling requests can be supported
154
	 * - check whether staged scaling requests can be supported
151
	 * - add planes using scalers that aren't in current transaction
155
	 * - add planes using scalers that aren't in current transaction
152
	 * - assign scalers to requested users
156
	 * - assign scalers to requested users
153
	 * - as part of plane commit, scalers will be committed
157
	 * - as part of plane commit, scalers will be committed
154
	 *   (i.e., either attached or detached) to respective planes in hw
158
	 *   (i.e., either attached or detached) to respective planes in hw
155
	 * - as part of crtc_commit, scaler will be either attached or detached
159
	 * - as part of crtc_commit, scaler will be either attached or detached
156
	 *   to crtc in hw
160
	 *   to crtc in hw
157
	 */
161
	 */
158
 
162
 
159
	/* fail if required scalers > available scalers */
163
	/* fail if required scalers > available scalers */
160
	if (num_scalers_need > intel_crtc->num_scalers){
164
	if (num_scalers_need > intel_crtc->num_scalers){
161
		DRM_DEBUG_KMS("Too many scaling requests %d > %d\n",
165
		DRM_DEBUG_KMS("Too many scaling requests %d > %d\n",
162
			num_scalers_need, intel_crtc->num_scalers);
166
			num_scalers_need, intel_crtc->num_scalers);
163
		return -EINVAL;
167
		return -EINVAL;
164
	}
168
	}
165
 
169
 
166
	/* walkthrough scaler_users bits and start assigning scalers */
170
	/* walkthrough scaler_users bits and start assigning scalers */
167
	for (i = 0; i < sizeof(scaler_state->scaler_users) * 8; i++) {
171
	for (i = 0; i < sizeof(scaler_state->scaler_users) * 8; i++) {
168
		int *scaler_id;
172
		int *scaler_id;
169
		const char *name;
173
		const char *name;
170
		int idx;
174
		int idx;
171
 
175
 
172
		/* skip if scaler not required */
176
		/* skip if scaler not required */
173
		if (!(scaler_state->scaler_users & (1 << i)))
177
		if (!(scaler_state->scaler_users & (1 << i)))
174
			continue;
178
			continue;
175
 
179
 
176
		if (i == SKL_CRTC_INDEX) {
180
		if (i == SKL_CRTC_INDEX) {
177
			name = "CRTC";
181
			name = "CRTC";
178
			idx = intel_crtc->base.base.id;
182
			idx = intel_crtc->base.base.id;
179
 
183
 
180
			/* panel fitter case: assign as a crtc scaler */
184
			/* panel fitter case: assign as a crtc scaler */
181
			scaler_id = &scaler_state->scaler_id;
185
			scaler_id = &scaler_state->scaler_id;
182
		} else {
186
		} else {
183
			name = "PLANE";
187
			name = "PLANE";
184
 
188
 
185
			/* plane scaler case: assign as a plane scaler */
189
			/* plane scaler case: assign as a plane scaler */
186
			/* find the plane that set the bit as scaler_user */
190
			/* find the plane that set the bit as scaler_user */
187
			plane = drm_state->planes[i];
191
			plane = drm_state->planes[i];
188
 
192
 
189
			/*
193
			/*
190
			 * to enable/disable hq mode, add planes that are using scaler
194
			 * to enable/disable hq mode, add planes that are using scaler
191
			 * into this transaction
195
			 * into this transaction
192
			 */
196
			 */
193
			if (!plane) {
197
			if (!plane) {
194
				struct drm_plane_state *state;
198
				struct drm_plane_state *state;
195
				plane = drm_plane_from_index(dev, i);
199
				plane = drm_plane_from_index(dev, i);
196
				state = drm_atomic_get_plane_state(drm_state, plane);
200
				state = drm_atomic_get_plane_state(drm_state, plane);
197
				if (IS_ERR(state)) {
201
				if (IS_ERR(state)) {
198
					DRM_DEBUG_KMS("Failed to add [PLANE:%d] to drm_state\n",
202
					DRM_DEBUG_KMS("Failed to add [PLANE:%d] to drm_state\n",
199
						plane->base.id);
203
						plane->base.id);
200
					return PTR_ERR(state);
204
					return PTR_ERR(state);
201
				}
205
				}
202
 
206
 
203
				/*
207
				/*
204
				 * the plane is added after plane checks are run,
208
				 * the plane is added after plane checks are run,
205
				 * but since this plane is unchanged just do the
209
				 * but since this plane is unchanged just do the
206
				 * minimum required validation.
210
				 * minimum required validation.
207
				 */
211
				 */
208
				if (plane->type == DRM_PLANE_TYPE_PRIMARY)
-
 
209
					intel_crtc->atomic.wait_for_flips = true;
-
 
210
				crtc_state->base.planes_changed = true;
212
				crtc_state->base.planes_changed = true;
211
			}
213
			}
212
 
214
 
213
			intel_plane = to_intel_plane(plane);
215
			intel_plane = to_intel_plane(plane);
214
			idx = plane->base.id;
216
			idx = plane->base.id;
215
 
217
 
216
			/* plane on different crtc cannot be a scaler user of this crtc */
218
			/* plane on different crtc cannot be a scaler user of this crtc */
217
			if (WARN_ON(intel_plane->pipe != intel_crtc->pipe)) {
219
			if (WARN_ON(intel_plane->pipe != intel_crtc->pipe)) {
218
				continue;
220
				continue;
219
			}
221
			}
220
 
222
 
221
			plane_state = to_intel_plane_state(drm_state->plane_states[i]);
223
			plane_state = to_intel_plane_state(drm_state->plane_states[i]);
222
			scaler_id = &plane_state->scaler_id;
224
			scaler_id = &plane_state->scaler_id;
223
		}
225
		}
224
 
226
 
225
		if (*scaler_id < 0) {
227
		if (*scaler_id < 0) {
226
			/* find a free scaler */
228
			/* find a free scaler */
227
			for (j = 0; j < intel_crtc->num_scalers; j++) {
229
			for (j = 0; j < intel_crtc->num_scalers; j++) {
228
				if (!scaler_state->scalers[j].in_use) {
230
				if (!scaler_state->scalers[j].in_use) {
229
					scaler_state->scalers[j].in_use = 1;
231
					scaler_state->scalers[j].in_use = 1;
230
					*scaler_id = j;
232
					*scaler_id = j;
231
					DRM_DEBUG_KMS("Attached scaler id %u.%u to %s:%d\n",
233
					DRM_DEBUG_KMS("Attached scaler id %u.%u to %s:%d\n",
232
						intel_crtc->pipe, *scaler_id, name, idx);
234
						intel_crtc->pipe, *scaler_id, name, idx);
233
					break;
235
					break;
234
				}
236
				}
235
			}
237
			}
236
		}
238
		}
237
 
239
 
238
		if (WARN_ON(*scaler_id < 0)) {
240
		if (WARN_ON(*scaler_id < 0)) {
239
			DRM_DEBUG_KMS("Cannot find scaler for %s:%d\n", name, idx);
241
			DRM_DEBUG_KMS("Cannot find scaler for %s:%d\n", name, idx);
240
			continue;
242
			continue;
241
		}
243
		}
242
 
244
 
243
		/* set scaler mode */
245
		/* set scaler mode */
244
		if (num_scalers_need == 1 && intel_crtc->pipe != PIPE_C) {
246
		if (num_scalers_need == 1 && intel_crtc->pipe != PIPE_C) {
245
			/*
247
			/*
246
			 * when only 1 scaler is in use on either pipe A or B,
248
			 * when only 1 scaler is in use on either pipe A or B,
247
			 * scaler 0 operates in high quality (HQ) mode.
249
			 * scaler 0 operates in high quality (HQ) mode.
248
			 * In this case use scaler 0 to take advantage of HQ mode
250
			 * In this case use scaler 0 to take advantage of HQ mode
249
			 */
251
			 */
250
			*scaler_id = 0;
252
			*scaler_id = 0;
251
			scaler_state->scalers[0].in_use = 1;
253
			scaler_state->scalers[0].in_use = 1;
252
			scaler_state->scalers[0].mode = PS_SCALER_MODE_HQ;
254
			scaler_state->scalers[0].mode = PS_SCALER_MODE_HQ;
253
			scaler_state->scalers[1].in_use = 0;
255
			scaler_state->scalers[1].in_use = 0;
254
		} else {
256
		} else {
255
			scaler_state->scalers[*scaler_id].mode = PS_SCALER_MODE_DYN;
257
			scaler_state->scalers[*scaler_id].mode = PS_SCALER_MODE_DYN;
256
		}
258
		}
257
	}
259
	}
258
 
260
 
259
	return 0;
261
	return 0;
260
}
262
}
261
 
263
 
262
static void
264
static void
263
intel_atomic_duplicate_dpll_state(struct drm_i915_private *dev_priv,
265
intel_atomic_duplicate_dpll_state(struct drm_i915_private *dev_priv,
264
				  struct intel_shared_dpll_config *shared_dpll)
266
				  struct intel_shared_dpll_config *shared_dpll)
265
{
267
{
266
	enum intel_dpll_id i;
268
	enum intel_dpll_id i;
267
 
269
 
268
	/* Copy shared dpll state */
270
	/* Copy shared dpll state */
269
	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
271
	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
270
		struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
272
		struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
271
 
273
 
272
		shared_dpll[i] = pll->config;
274
		shared_dpll[i] = pll->config;
273
	}
275
	}
274
}
276
}
275
 
277
 
276
struct intel_shared_dpll_config *
278
struct intel_shared_dpll_config *
277
intel_atomic_get_shared_dpll_state(struct drm_atomic_state *s)
279
intel_atomic_get_shared_dpll_state(struct drm_atomic_state *s)
278
{
280
{
279
	struct intel_atomic_state *state = to_intel_atomic_state(s);
281
	struct intel_atomic_state *state = to_intel_atomic_state(s);
280
 
282
 
281
	WARN_ON(!drm_modeset_is_locked(&s->dev->mode_config.connection_mutex));
283
	WARN_ON(!drm_modeset_is_locked(&s->dev->mode_config.connection_mutex));
282
 
284
 
283
	if (!state->dpll_set) {
285
	if (!state->dpll_set) {
284
		state->dpll_set = true;
286
		state->dpll_set = true;
285
 
287
 
286
		intel_atomic_duplicate_dpll_state(to_i915(s->dev),
288
		intel_atomic_duplicate_dpll_state(to_i915(s->dev),
287
						  state->shared_dpll);
289
						  state->shared_dpll);
288
	}
290
	}
289
 
291
 
290
	return state->shared_dpll;
292
	return state->shared_dpll;
291
}
293
}
292
 
294
 
293
struct drm_atomic_state *
295
struct drm_atomic_state *
294
intel_atomic_state_alloc(struct drm_device *dev)
296
intel_atomic_state_alloc(struct drm_device *dev)
295
{
297
{
296
	struct intel_atomic_state *state = kzalloc(sizeof(*state), GFP_KERNEL);
298
	struct intel_atomic_state *state = kzalloc(sizeof(*state), GFP_KERNEL);
297
 
299
 
298
	if (!state || drm_atomic_state_init(dev, &state->base) < 0) {
300
	if (!state || drm_atomic_state_init(dev, &state->base) < 0) {
299
		kfree(state);
301
		kfree(state);
300
		return NULL;
302
		return NULL;
301
	}
303
	}
302
 
304
 
303
	return &state->base;
305
	return &state->base;
304
}
306
}
305
 
307
 
306
void intel_atomic_state_clear(struct drm_atomic_state *s)
308
void intel_atomic_state_clear(struct drm_atomic_state *s)
307
{
309
{
308
	struct intel_atomic_state *state = to_intel_atomic_state(s);
310
	struct intel_atomic_state *state = to_intel_atomic_state(s);
309
	drm_atomic_state_default_clear(&state->base);
311
	drm_atomic_state_default_clear(&state->base);
310
	state->dpll_set = false;
312
	state->dpll_set = false;
311
}
313
}