Subversion Repositories Kolibri OS

Rev

Rev 6935 | Go to most recent revision | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 6935 Rev 6937
1
/*
1
/*
2
 * Copyright © 2014 Red Hat
2
 * Copyright © 2014 Red Hat
3
 *
3
 *
4
 * Permission to use, copy, modify, distribute, and sell this software and its
4
 * Permission to use, copy, modify, distribute, and sell this software and its
5
 * documentation for any purpose is hereby granted without fee, provided that
5
 * documentation for any purpose is hereby granted without fee, provided that
6
 * the above copyright notice appear in all copies and that both that copyright
6
 * the above copyright notice appear in all copies and that both that copyright
7
 * notice and this permission notice appear in supporting documentation, and
7
 * notice and this permission notice appear in supporting documentation, and
8
 * that the name of the copyright holders not be used in advertising or
8
 * that the name of the copyright holders not be used in advertising or
9
 * publicity pertaining to distribution of the software without specific,
9
 * publicity pertaining to distribution of the software without specific,
10
 * written prior permission.  The copyright holders make no representations
10
 * written prior permission.  The copyright holders make no representations
11
 * about the suitability of this software for any purpose.  It is provided "as
11
 * about the suitability of this software for any purpose.  It is provided "as
12
 * is" without express or implied warranty.
12
 * is" without express or implied warranty.
13
 *
13
 *
14
 * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
14
 * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
15
 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
15
 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
16
 * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
16
 * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
17
 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
17
 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
18
 * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18
 * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
19
 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
19
 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
20
 * OF THIS SOFTWARE.
20
 * OF THIS SOFTWARE.
21
 */
21
 */
22
 
22
 
23
#include 
23
#include 
24
#include 
-
 
25
#include 
-
 
26
#include 
-
 
27
#include 
-
 
28
#include 
24
#include 
29
#include 
25
#include 
30
#include 
26
#include 
31
#include 
27
#include 
32
#include 
28
#include 
33
#include 
29
#include 
34
#include 
30
#include 
35
#include 
31
#include 
36
 
32
 
37
#include 
33
#include 
38
 
34
 
39
u64 get_jiffies_64(void)
35
u64 get_jiffies_64(void)
40
{
36
{
41
    return jiffies;
37
    return jiffies;
42
}
38
}
43
/**
39
/**
44
 * DOC: dp mst helper
40
 * DOC: dp mst helper
45
 *
41
 *
46
 * These functions contain parts of the DisplayPort 1.2a MultiStream Transport
42
 * These functions contain parts of the DisplayPort 1.2a MultiStream Transport
47
 * protocol. The helpers contain a topology manager and bandwidth manager.
43
 * protocol. The helpers contain a topology manager and bandwidth manager.
48
 * The helpers encapsulate the sending and received of sideband msgs.
44
 * The helpers encapsulate the sending and received of sideband msgs.
49
 */
45
 */
50
static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
46
static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
51
				  char *buf);
47
				  char *buf);
52
static int test_calc_pbn_mode(void);
48
static int test_calc_pbn_mode(void);
53
 
49
 
54
static void drm_dp_put_port(struct drm_dp_mst_port *port);
50
static void drm_dp_put_port(struct drm_dp_mst_port *port);
55
 
51
 
56
static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
52
static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
57
				     int id,
53
				     int id,
58
				     struct drm_dp_payload *payload);
54
				     struct drm_dp_payload *payload);
59
 
55
 
60
static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
56
static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
61
				  struct drm_dp_mst_port *port,
57
				  struct drm_dp_mst_port *port,
62
				  int offset, int size, u8 *bytes);
58
				  int offset, int size, u8 *bytes);
63
 
59
 
64
static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
60
static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
65
				     struct drm_dp_mst_branch *mstb);
61
				     struct drm_dp_mst_branch *mstb);
66
static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
62
static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
67
					   struct drm_dp_mst_branch *mstb,
63
					   struct drm_dp_mst_branch *mstb,
68
					   struct drm_dp_mst_port *port);
64
					   struct drm_dp_mst_port *port);
69
static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
65
static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
70
				 u8 *guid);
66
				 u8 *guid);
71
 
67
 
72
static int drm_dp_mst_register_i2c_bus(struct drm_dp_aux *aux);
68
static int drm_dp_mst_register_i2c_bus(struct drm_dp_aux *aux);
73
static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_aux *aux);
69
static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_aux *aux);
74
static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr);
70
static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr);
75
/* sideband msg handling */
71
/* sideband msg handling */
76
static u8 drm_dp_msg_header_crc4(const uint8_t *data, size_t num_nibbles)
72
static u8 drm_dp_msg_header_crc4(const uint8_t *data, size_t num_nibbles)
77
{
73
{
78
	u8 bitmask = 0x80;
74
	u8 bitmask = 0x80;
79
	u8 bitshift = 7;
75
	u8 bitshift = 7;
80
	u8 array_index = 0;
76
	u8 array_index = 0;
81
	int number_of_bits = num_nibbles * 4;
77
	int number_of_bits = num_nibbles * 4;
82
	u8 remainder = 0;
78
	u8 remainder = 0;
83
 
79
 
84
	while (number_of_bits != 0) {
80
	while (number_of_bits != 0) {
85
		number_of_bits--;
81
		number_of_bits--;
86
		remainder <<= 1;
82
		remainder <<= 1;
87
		remainder |= (data[array_index] & bitmask) >> bitshift;
83
		remainder |= (data[array_index] & bitmask) >> bitshift;
88
		bitmask >>= 1;
84
		bitmask >>= 1;
89
		bitshift--;
85
		bitshift--;
90
		if (bitmask == 0) {
86
		if (bitmask == 0) {
91
			bitmask = 0x80;
87
			bitmask = 0x80;
92
			bitshift = 7;
88
			bitshift = 7;
93
			array_index++;
89
			array_index++;
94
		}
90
		}
95
		if ((remainder & 0x10) == 0x10)
91
		if ((remainder & 0x10) == 0x10)
96
			remainder ^= 0x13;
92
			remainder ^= 0x13;
97
	}
93
	}
98
 
94
 
99
	number_of_bits = 4;
95
	number_of_bits = 4;
100
	while (number_of_bits != 0) {
96
	while (number_of_bits != 0) {
101
		number_of_bits--;
97
		number_of_bits--;
102
		remainder <<= 1;
98
		remainder <<= 1;
103
		if ((remainder & 0x10) != 0)
99
		if ((remainder & 0x10) != 0)
104
			remainder ^= 0x13;
100
			remainder ^= 0x13;
105
	}
101
	}
106
 
102
 
107
	return remainder;
103
	return remainder;
108
}
104
}
109
 
105
 
110
static u8 drm_dp_msg_data_crc4(const uint8_t *data, u8 number_of_bytes)
106
static u8 drm_dp_msg_data_crc4(const uint8_t *data, u8 number_of_bytes)
111
{
107
{
112
	u8 bitmask = 0x80;
108
	u8 bitmask = 0x80;
113
	u8 bitshift = 7;
109
	u8 bitshift = 7;
114
	u8 array_index = 0;
110
	u8 array_index = 0;
115
	int number_of_bits = number_of_bytes * 8;
111
	int number_of_bits = number_of_bytes * 8;
116
	u16 remainder = 0;
112
	u16 remainder = 0;
117
 
113
 
118
	while (number_of_bits != 0) {
114
	while (number_of_bits != 0) {
119
		number_of_bits--;
115
		number_of_bits--;
120
		remainder <<= 1;
116
		remainder <<= 1;
121
		remainder |= (data[array_index] & bitmask) >> bitshift;
117
		remainder |= (data[array_index] & bitmask) >> bitshift;
122
		bitmask >>= 1;
118
		bitmask >>= 1;
123
		bitshift--;
119
		bitshift--;
124
		if (bitmask == 0) {
120
		if (bitmask == 0) {
125
			bitmask = 0x80;
121
			bitmask = 0x80;
126
			bitshift = 7;
122
			bitshift = 7;
127
			array_index++;
123
			array_index++;
128
		}
124
		}
129
		if ((remainder & 0x100) == 0x100)
125
		if ((remainder & 0x100) == 0x100)
130
			remainder ^= 0xd5;
126
			remainder ^= 0xd5;
131
	}
127
	}
132
 
128
 
133
	number_of_bits = 8;
129
	number_of_bits = 8;
134
	while (number_of_bits != 0) {
130
	while (number_of_bits != 0) {
135
		number_of_bits--;
131
		number_of_bits--;
136
		remainder <<= 1;
132
		remainder <<= 1;
137
		if ((remainder & 0x100) != 0)
133
		if ((remainder & 0x100) != 0)
138
			remainder ^= 0xd5;
134
			remainder ^= 0xd5;
139
	}
135
	}
140
 
136
 
141
	return remainder & 0xff;
137
	return remainder & 0xff;
142
}
138
}
143
static inline u8 drm_dp_calc_sb_hdr_size(struct drm_dp_sideband_msg_hdr *hdr)
139
static inline u8 drm_dp_calc_sb_hdr_size(struct drm_dp_sideband_msg_hdr *hdr)
144
{
140
{
145
	u8 size = 3;
141
	u8 size = 3;
146
	size += (hdr->lct / 2);
142
	size += (hdr->lct / 2);
147
	return size;
143
	return size;
148
}
144
}
149
 
145
 
150
static void drm_dp_encode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr,
146
static void drm_dp_encode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr,
151
					   u8 *buf, int *len)
147
					   u8 *buf, int *len)
152
{
148
{
153
	int idx = 0;
149
	int idx = 0;
154
	int i;
150
	int i;
155
	u8 crc4;
151
	u8 crc4;
156
	buf[idx++] = ((hdr->lct & 0xf) << 4) | (hdr->lcr & 0xf);
152
	buf[idx++] = ((hdr->lct & 0xf) << 4) | (hdr->lcr & 0xf);
157
	for (i = 0; i < (hdr->lct / 2); i++)
153
	for (i = 0; i < (hdr->lct / 2); i++)
158
		buf[idx++] = hdr->rad[i];
154
		buf[idx++] = hdr->rad[i];
159
	buf[idx++] = (hdr->broadcast << 7) | (hdr->path_msg << 6) |
155
	buf[idx++] = (hdr->broadcast << 7) | (hdr->path_msg << 6) |
160
		(hdr->msg_len & 0x3f);
156
		(hdr->msg_len & 0x3f);
161
	buf[idx++] = (hdr->somt << 7) | (hdr->eomt << 6) | (hdr->seqno << 4);
157
	buf[idx++] = (hdr->somt << 7) | (hdr->eomt << 6) | (hdr->seqno << 4);
162
 
158
 
163
	crc4 = drm_dp_msg_header_crc4(buf, (idx * 2) - 1);
159
	crc4 = drm_dp_msg_header_crc4(buf, (idx * 2) - 1);
164
	buf[idx - 1] |= (crc4 & 0xf);
160
	buf[idx - 1] |= (crc4 & 0xf);
165
 
161
 
166
	*len = idx;
162
	*len = idx;
167
}
163
}
168
 
164
 
169
static bool drm_dp_decode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr,
165
static bool drm_dp_decode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr,
170
					   u8 *buf, int buflen, u8 *hdrlen)
166
					   u8 *buf, int buflen, u8 *hdrlen)
171
{
167
{
172
	u8 crc4;
168
	u8 crc4;
173
	u8 len;
169
	u8 len;
174
	int i;
170
	int i;
175
	u8 idx;
171
	u8 idx;
176
	if (buf[0] == 0)
172
	if (buf[0] == 0)
177
		return false;
173
		return false;
178
	len = 3;
174
	len = 3;
179
	len += ((buf[0] & 0xf0) >> 4) / 2;
175
	len += ((buf[0] & 0xf0) >> 4) / 2;
180
	if (len > buflen)
176
	if (len > buflen)
181
		return false;
177
		return false;
182
	crc4 = drm_dp_msg_header_crc4(buf, (len * 2) - 1);
178
	crc4 = drm_dp_msg_header_crc4(buf, (len * 2) - 1);
183
 
179
 
184
	if ((crc4 & 0xf) != (buf[len - 1] & 0xf)) {
180
	if ((crc4 & 0xf) != (buf[len - 1] & 0xf)) {
185
		DRM_DEBUG_KMS("crc4 mismatch 0x%x 0x%x\n", crc4, buf[len - 1]);
181
		DRM_DEBUG_KMS("crc4 mismatch 0x%x 0x%x\n", crc4, buf[len - 1]);
186
		return false;
182
		return false;
187
	}
183
	}
188
 
184
 
189
	hdr->lct = (buf[0] & 0xf0) >> 4;
185
	hdr->lct = (buf[0] & 0xf0) >> 4;
190
	hdr->lcr = (buf[0] & 0xf);
186
	hdr->lcr = (buf[0] & 0xf);
191
	idx = 1;
187
	idx = 1;
192
	for (i = 0; i < (hdr->lct / 2); i++)
188
	for (i = 0; i < (hdr->lct / 2); i++)
193
		hdr->rad[i] = buf[idx++];
189
		hdr->rad[i] = buf[idx++];
194
	hdr->broadcast = (buf[idx] >> 7) & 0x1;
190
	hdr->broadcast = (buf[idx] >> 7) & 0x1;
195
	hdr->path_msg = (buf[idx] >> 6) & 0x1;
191
	hdr->path_msg = (buf[idx] >> 6) & 0x1;
196
	hdr->msg_len = buf[idx] & 0x3f;
192
	hdr->msg_len = buf[idx] & 0x3f;
197
	idx++;
193
	idx++;
198
	hdr->somt = (buf[idx] >> 7) & 0x1;
194
	hdr->somt = (buf[idx] >> 7) & 0x1;
199
	hdr->eomt = (buf[idx] >> 6) & 0x1;
195
	hdr->eomt = (buf[idx] >> 6) & 0x1;
200
	hdr->seqno = (buf[idx] >> 4) & 0x1;
196
	hdr->seqno = (buf[idx] >> 4) & 0x1;
201
	idx++;
197
	idx++;
202
	*hdrlen = idx;
198
	*hdrlen = idx;
203
	return true;
199
	return true;
204
}
200
}
205
 
201
 
206
static void drm_dp_encode_sideband_req(struct drm_dp_sideband_msg_req_body *req,
202
static void drm_dp_encode_sideband_req(struct drm_dp_sideband_msg_req_body *req,
207
				       struct drm_dp_sideband_msg_tx *raw)
203
				       struct drm_dp_sideband_msg_tx *raw)
208
{
204
{
209
	int idx = 0;
205
	int idx = 0;
210
	int i;
206
	int i;
211
	u8 *buf = raw->msg;
207
	u8 *buf = raw->msg;
212
	buf[idx++] = req->req_type & 0x7f;
208
	buf[idx++] = req->req_type & 0x7f;
213
 
209
 
214
	switch (req->req_type) {
210
	switch (req->req_type) {
215
	case DP_ENUM_PATH_RESOURCES:
211
	case DP_ENUM_PATH_RESOURCES:
216
		buf[idx] = (req->u.port_num.port_number & 0xf) << 4;
212
		buf[idx] = (req->u.port_num.port_number & 0xf) << 4;
217
		idx++;
213
		idx++;
218
		break;
214
		break;
219
	case DP_ALLOCATE_PAYLOAD:
215
	case DP_ALLOCATE_PAYLOAD:
220
		buf[idx] = (req->u.allocate_payload.port_number & 0xf) << 4 |
216
		buf[idx] = (req->u.allocate_payload.port_number & 0xf) << 4 |
221
			(req->u.allocate_payload.number_sdp_streams & 0xf);
217
			(req->u.allocate_payload.number_sdp_streams & 0xf);
222
		idx++;
218
		idx++;
223
		buf[idx] = (req->u.allocate_payload.vcpi & 0x7f);
219
		buf[idx] = (req->u.allocate_payload.vcpi & 0x7f);
224
		idx++;
220
		idx++;
225
		buf[idx] = (req->u.allocate_payload.pbn >> 8);
221
		buf[idx] = (req->u.allocate_payload.pbn >> 8);
226
		idx++;
222
		idx++;
227
		buf[idx] = (req->u.allocate_payload.pbn & 0xff);
223
		buf[idx] = (req->u.allocate_payload.pbn & 0xff);
228
		idx++;
224
		idx++;
229
		for (i = 0; i < req->u.allocate_payload.number_sdp_streams / 2; i++) {
225
		for (i = 0; i < req->u.allocate_payload.number_sdp_streams / 2; i++) {
230
			buf[idx] = ((req->u.allocate_payload.sdp_stream_sink[i * 2] & 0xf) << 4) |
226
			buf[idx] = ((req->u.allocate_payload.sdp_stream_sink[i * 2] & 0xf) << 4) |
231
				(req->u.allocate_payload.sdp_stream_sink[i * 2 + 1] & 0xf);
227
				(req->u.allocate_payload.sdp_stream_sink[i * 2 + 1] & 0xf);
232
			idx++;
228
			idx++;
233
		}
229
		}
234
		if (req->u.allocate_payload.number_sdp_streams & 1) {
230
		if (req->u.allocate_payload.number_sdp_streams & 1) {
235
			i = req->u.allocate_payload.number_sdp_streams - 1;
231
			i = req->u.allocate_payload.number_sdp_streams - 1;
236
			buf[idx] = (req->u.allocate_payload.sdp_stream_sink[i] & 0xf) << 4;
232
			buf[idx] = (req->u.allocate_payload.sdp_stream_sink[i] & 0xf) << 4;
237
			idx++;
233
			idx++;
238
		}
234
		}
239
		break;
235
		break;
240
	case DP_QUERY_PAYLOAD:
236
	case DP_QUERY_PAYLOAD:
241
		buf[idx] = (req->u.query_payload.port_number & 0xf) << 4;
237
		buf[idx] = (req->u.query_payload.port_number & 0xf) << 4;
242
		idx++;
238
		idx++;
243
		buf[idx] = (req->u.query_payload.vcpi & 0x7f);
239
		buf[idx] = (req->u.query_payload.vcpi & 0x7f);
244
		idx++;
240
		idx++;
245
		break;
241
		break;
246
	case DP_REMOTE_DPCD_READ:
242
	case DP_REMOTE_DPCD_READ:
247
		buf[idx] = (req->u.dpcd_read.port_number & 0xf) << 4;
243
		buf[idx] = (req->u.dpcd_read.port_number & 0xf) << 4;
248
		buf[idx] |= ((req->u.dpcd_read.dpcd_address & 0xf0000) >> 16) & 0xf;
244
		buf[idx] |= ((req->u.dpcd_read.dpcd_address & 0xf0000) >> 16) & 0xf;
249
		idx++;
245
		idx++;
250
		buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff00) >> 8;
246
		buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff00) >> 8;
251
		idx++;
247
		idx++;
252
		buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff);
248
		buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff);
253
		idx++;
249
		idx++;
254
		buf[idx] = (req->u.dpcd_read.num_bytes);
250
		buf[idx] = (req->u.dpcd_read.num_bytes);
255
		idx++;
251
		idx++;
256
		break;
252
		break;
257
 
253
 
258
	case DP_REMOTE_DPCD_WRITE:
254
	case DP_REMOTE_DPCD_WRITE:
259
		buf[idx] = (req->u.dpcd_write.port_number & 0xf) << 4;
255
		buf[idx] = (req->u.dpcd_write.port_number & 0xf) << 4;
260
		buf[idx] |= ((req->u.dpcd_write.dpcd_address & 0xf0000) >> 16) & 0xf;
256
		buf[idx] |= ((req->u.dpcd_write.dpcd_address & 0xf0000) >> 16) & 0xf;
261
		idx++;
257
		idx++;
262
		buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff00) >> 8;
258
		buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff00) >> 8;
263
		idx++;
259
		idx++;
264
		buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff);
260
		buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff);
265
		idx++;
261
		idx++;
266
		buf[idx] = (req->u.dpcd_write.num_bytes);
262
		buf[idx] = (req->u.dpcd_write.num_bytes);
267
		idx++;
263
		idx++;
268
		memcpy(&buf[idx], req->u.dpcd_write.bytes, req->u.dpcd_write.num_bytes);
264
		memcpy(&buf[idx], req->u.dpcd_write.bytes, req->u.dpcd_write.num_bytes);
269
		idx += req->u.dpcd_write.num_bytes;
265
		idx += req->u.dpcd_write.num_bytes;
270
		break;
266
		break;
271
	case DP_REMOTE_I2C_READ:
267
	case DP_REMOTE_I2C_READ:
272
		buf[idx] = (req->u.i2c_read.port_number & 0xf) << 4;
268
		buf[idx] = (req->u.i2c_read.port_number & 0xf) << 4;
273
		buf[idx] |= (req->u.i2c_read.num_transactions & 0x3);
269
		buf[idx] |= (req->u.i2c_read.num_transactions & 0x3);
274
		idx++;
270
		idx++;
275
		for (i = 0; i < (req->u.i2c_read.num_transactions & 0x3); i++) {
271
		for (i = 0; i < (req->u.i2c_read.num_transactions & 0x3); i++) {
276
			buf[idx] = req->u.i2c_read.transactions[i].i2c_dev_id & 0x7f;
272
			buf[idx] = req->u.i2c_read.transactions[i].i2c_dev_id & 0x7f;
277
			idx++;
273
			idx++;
278
			buf[idx] = req->u.i2c_read.transactions[i].num_bytes;
274
			buf[idx] = req->u.i2c_read.transactions[i].num_bytes;
279
			idx++;
275
			idx++;
280
			memcpy(&buf[idx], req->u.i2c_read.transactions[i].bytes, req->u.i2c_read.transactions[i].num_bytes);
276
			memcpy(&buf[idx], req->u.i2c_read.transactions[i].bytes, req->u.i2c_read.transactions[i].num_bytes);
281
			idx += req->u.i2c_read.transactions[i].num_bytes;
277
			idx += req->u.i2c_read.transactions[i].num_bytes;
282
 
278
 
283
			buf[idx] = (req->u.i2c_read.transactions[i].no_stop_bit & 0x1) << 5;
279
			buf[idx] = (req->u.i2c_read.transactions[i].no_stop_bit & 0x1) << 5;
284
			buf[idx] |= (req->u.i2c_read.transactions[i].i2c_transaction_delay & 0xf);
280
			buf[idx] |= (req->u.i2c_read.transactions[i].i2c_transaction_delay & 0xf);
285
			idx++;
281
			idx++;
286
		}
282
		}
287
		buf[idx] = (req->u.i2c_read.read_i2c_device_id) & 0x7f;
283
		buf[idx] = (req->u.i2c_read.read_i2c_device_id) & 0x7f;
288
		idx++;
284
		idx++;
289
		buf[idx] = (req->u.i2c_read.num_bytes_read);
285
		buf[idx] = (req->u.i2c_read.num_bytes_read);
290
		idx++;
286
		idx++;
291
		break;
287
		break;
292
 
288
 
293
	case DP_REMOTE_I2C_WRITE:
289
	case DP_REMOTE_I2C_WRITE:
294
		buf[idx] = (req->u.i2c_write.port_number & 0xf) << 4;
290
		buf[idx] = (req->u.i2c_write.port_number & 0xf) << 4;
295
		idx++;
291
		idx++;
296
		buf[idx] = (req->u.i2c_write.write_i2c_device_id) & 0x7f;
292
		buf[idx] = (req->u.i2c_write.write_i2c_device_id) & 0x7f;
297
		idx++;
293
		idx++;
298
		buf[idx] = (req->u.i2c_write.num_bytes);
294
		buf[idx] = (req->u.i2c_write.num_bytes);
299
		idx++;
295
		idx++;
300
		memcpy(&buf[idx], req->u.i2c_write.bytes, req->u.i2c_write.num_bytes);
296
		memcpy(&buf[idx], req->u.i2c_write.bytes, req->u.i2c_write.num_bytes);
301
		idx += req->u.i2c_write.num_bytes;
297
		idx += req->u.i2c_write.num_bytes;
302
		break;
298
		break;
303
	}
299
	}
304
	raw->cur_len = idx;
300
	raw->cur_len = idx;
305
}
301
}
306
 
302
 
307
static void drm_dp_crc_sideband_chunk_req(u8 *msg, u8 len)
303
static void drm_dp_crc_sideband_chunk_req(u8 *msg, u8 len)
308
{
304
{
309
	u8 crc4;
305
	u8 crc4;
310
	crc4 = drm_dp_msg_data_crc4(msg, len);
306
	crc4 = drm_dp_msg_data_crc4(msg, len);
311
	msg[len] = crc4;
307
	msg[len] = crc4;
312
}
308
}
313
 
309
 
314
static void drm_dp_encode_sideband_reply(struct drm_dp_sideband_msg_reply_body *rep,
310
static void drm_dp_encode_sideband_reply(struct drm_dp_sideband_msg_reply_body *rep,
315
					 struct drm_dp_sideband_msg_tx *raw)
311
					 struct drm_dp_sideband_msg_tx *raw)
316
{
312
{
317
	int idx = 0;
313
	int idx = 0;
318
	u8 *buf = raw->msg;
314
	u8 *buf = raw->msg;
319
 
315
 
320
	buf[idx++] = (rep->reply_type & 0x1) << 7 | (rep->req_type & 0x7f);
316
	buf[idx++] = (rep->reply_type & 0x1) << 7 | (rep->req_type & 0x7f);
321
 
317
 
322
	raw->cur_len = idx;
318
	raw->cur_len = idx;
323
}
319
}
324
 
320
 
325
/* this adds a chunk of msg to the builder to get the final msg */
321
/* this adds a chunk of msg to the builder to get the final msg */
326
static bool drm_dp_sideband_msg_build(struct drm_dp_sideband_msg_rx *msg,
322
static bool drm_dp_sideband_msg_build(struct drm_dp_sideband_msg_rx *msg,
327
				      u8 *replybuf, u8 replybuflen, bool hdr)
323
				      u8 *replybuf, u8 replybuflen, bool hdr)
328
{
324
{
329
	int ret;
325
	int ret;
330
	u8 crc4;
326
	u8 crc4;
331
 
327
 
332
	if (hdr) {
328
	if (hdr) {
333
		u8 hdrlen;
329
		u8 hdrlen;
334
		struct drm_dp_sideband_msg_hdr recv_hdr;
330
		struct drm_dp_sideband_msg_hdr recv_hdr;
335
		ret = drm_dp_decode_sideband_msg_hdr(&recv_hdr, replybuf, replybuflen, &hdrlen);
331
		ret = drm_dp_decode_sideband_msg_hdr(&recv_hdr, replybuf, replybuflen, &hdrlen);
336
		if (ret == false) {
332
		if (ret == false) {
337
			print_hex_dump(KERN_DEBUG, "failed hdr", DUMP_PREFIX_NONE, 16, 1, replybuf, replybuflen, false);
333
			print_hex_dump(KERN_DEBUG, "failed hdr", DUMP_PREFIX_NONE, 16, 1, replybuf, replybuflen, false);
338
			return false;
334
			return false;
339
		}
335
		}
340
 
336
 
341
		/* get length contained in this portion */
337
		/* get length contained in this portion */
342
		msg->curchunk_len = recv_hdr.msg_len;
338
		msg->curchunk_len = recv_hdr.msg_len;
343
		msg->curchunk_hdrlen = hdrlen;
339
		msg->curchunk_hdrlen = hdrlen;
344
 
340
 
345
		/* we have already gotten an somt - don't bother parsing */
341
		/* we have already gotten an somt - don't bother parsing */
346
		if (recv_hdr.somt && msg->have_somt)
342
		if (recv_hdr.somt && msg->have_somt)
347
			return false;
343
			return false;
348
 
344
 
349
		if (recv_hdr.somt) {
345
		if (recv_hdr.somt) {
350
			memcpy(&msg->initial_hdr, &recv_hdr, sizeof(struct drm_dp_sideband_msg_hdr));
346
			memcpy(&msg->initial_hdr, &recv_hdr, sizeof(struct drm_dp_sideband_msg_hdr));
351
			msg->have_somt = true;
347
			msg->have_somt = true;
352
		}
348
		}
353
		if (recv_hdr.eomt)
349
		if (recv_hdr.eomt)
354
			msg->have_eomt = true;
350
			msg->have_eomt = true;
355
 
351
 
356
		/* copy the bytes for the remainder of this header chunk */
352
		/* copy the bytes for the remainder of this header chunk */
357
		msg->curchunk_idx = min(msg->curchunk_len, (u8)(replybuflen - hdrlen));
353
		msg->curchunk_idx = min(msg->curchunk_len, (u8)(replybuflen - hdrlen));
358
		memcpy(&msg->chunk[0], replybuf + hdrlen, msg->curchunk_idx);
354
		memcpy(&msg->chunk[0], replybuf + hdrlen, msg->curchunk_idx);
359
	} else {
355
	} else {
360
		memcpy(&msg->chunk[msg->curchunk_idx], replybuf, replybuflen);
356
		memcpy(&msg->chunk[msg->curchunk_idx], replybuf, replybuflen);
361
		msg->curchunk_idx += replybuflen;
357
		msg->curchunk_idx += replybuflen;
362
	}
358
	}
363
 
359
 
364
	if (msg->curchunk_idx >= msg->curchunk_len) {
360
	if (msg->curchunk_idx >= msg->curchunk_len) {
365
		/* do CRC */
361
		/* do CRC */
366
		crc4 = drm_dp_msg_data_crc4(msg->chunk, msg->curchunk_len - 1);
362
		crc4 = drm_dp_msg_data_crc4(msg->chunk, msg->curchunk_len - 1);
367
		/* copy chunk into bigger msg */
363
		/* copy chunk into bigger msg */
368
		memcpy(&msg->msg[msg->curlen], msg->chunk, msg->curchunk_len - 1);
364
		memcpy(&msg->msg[msg->curlen], msg->chunk, msg->curchunk_len - 1);
369
		msg->curlen += msg->curchunk_len - 1;
365
		msg->curlen += msg->curchunk_len - 1;
370
	}
366
	}
371
	return true;
367
	return true;
372
}
368
}
373
 
369
 
374
static bool drm_dp_sideband_parse_link_address(struct drm_dp_sideband_msg_rx *raw,
370
static bool drm_dp_sideband_parse_link_address(struct drm_dp_sideband_msg_rx *raw,
375
					       struct drm_dp_sideband_msg_reply_body *repmsg)
371
					       struct drm_dp_sideband_msg_reply_body *repmsg)
376
{
372
{
377
	int idx = 1;
373
	int idx = 1;
378
	int i;
374
	int i;
379
	memcpy(repmsg->u.link_addr.guid, &raw->msg[idx], 16);
375
	memcpy(repmsg->u.link_addr.guid, &raw->msg[idx], 16);
380
	idx += 16;
376
	idx += 16;
381
	repmsg->u.link_addr.nports = raw->msg[idx] & 0xf;
377
	repmsg->u.link_addr.nports = raw->msg[idx] & 0xf;
382
	idx++;
378
	idx++;
383
	if (idx > raw->curlen)
379
	if (idx > raw->curlen)
384
		goto fail_len;
380
		goto fail_len;
385
	for (i = 0; i < repmsg->u.link_addr.nports; i++) {
381
	for (i = 0; i < repmsg->u.link_addr.nports; i++) {
386
		if (raw->msg[idx] & 0x80)
382
		if (raw->msg[idx] & 0x80)
387
			repmsg->u.link_addr.ports[i].input_port = 1;
383
			repmsg->u.link_addr.ports[i].input_port = 1;
388
 
384
 
389
		repmsg->u.link_addr.ports[i].peer_device_type = (raw->msg[idx] >> 4) & 0x7;
385
		repmsg->u.link_addr.ports[i].peer_device_type = (raw->msg[idx] >> 4) & 0x7;
390
		repmsg->u.link_addr.ports[i].port_number = (raw->msg[idx] & 0xf);
386
		repmsg->u.link_addr.ports[i].port_number = (raw->msg[idx] & 0xf);
391
 
387
 
392
		idx++;
388
		idx++;
393
		if (idx > raw->curlen)
389
		if (idx > raw->curlen)
394
			goto fail_len;
390
			goto fail_len;
395
		repmsg->u.link_addr.ports[i].mcs = (raw->msg[idx] >> 7) & 0x1;
391
		repmsg->u.link_addr.ports[i].mcs = (raw->msg[idx] >> 7) & 0x1;
396
		repmsg->u.link_addr.ports[i].ddps = (raw->msg[idx] >> 6) & 0x1;
392
		repmsg->u.link_addr.ports[i].ddps = (raw->msg[idx] >> 6) & 0x1;
397
		if (repmsg->u.link_addr.ports[i].input_port == 0)
393
		if (repmsg->u.link_addr.ports[i].input_port == 0)
398
			repmsg->u.link_addr.ports[i].legacy_device_plug_status = (raw->msg[idx] >> 5) & 0x1;
394
			repmsg->u.link_addr.ports[i].legacy_device_plug_status = (raw->msg[idx] >> 5) & 0x1;
399
		idx++;
395
		idx++;
400
		if (idx > raw->curlen)
396
		if (idx > raw->curlen)
401
			goto fail_len;
397
			goto fail_len;
402
		if (repmsg->u.link_addr.ports[i].input_port == 0) {
398
		if (repmsg->u.link_addr.ports[i].input_port == 0) {
403
			repmsg->u.link_addr.ports[i].dpcd_revision = (raw->msg[idx]);
399
			repmsg->u.link_addr.ports[i].dpcd_revision = (raw->msg[idx]);
404
			idx++;
400
			idx++;
405
			if (idx > raw->curlen)
401
			if (idx > raw->curlen)
406
				goto fail_len;
402
				goto fail_len;
407
			memcpy(repmsg->u.link_addr.ports[i].peer_guid, &raw->msg[idx], 16);
403
			memcpy(repmsg->u.link_addr.ports[i].peer_guid, &raw->msg[idx], 16);
408
			idx += 16;
404
			idx += 16;
409
			if (idx > raw->curlen)
405
			if (idx > raw->curlen)
410
				goto fail_len;
406
				goto fail_len;
411
			repmsg->u.link_addr.ports[i].num_sdp_streams = (raw->msg[idx] >> 4) & 0xf;
407
			repmsg->u.link_addr.ports[i].num_sdp_streams = (raw->msg[idx] >> 4) & 0xf;
412
			repmsg->u.link_addr.ports[i].num_sdp_stream_sinks = (raw->msg[idx] & 0xf);
408
			repmsg->u.link_addr.ports[i].num_sdp_stream_sinks = (raw->msg[idx] & 0xf);
413
			idx++;
409
			idx++;
414
 
410
 
415
		}
411
		}
416
		if (idx > raw->curlen)
412
		if (idx > raw->curlen)
417
			goto fail_len;
413
			goto fail_len;
418
	}
414
	}
419
 
415
 
420
	return true;
416
	return true;
421
fail_len:
417
fail_len:
422
	DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen);
418
	DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen);
423
	return false;
419
	return false;
424
}
420
}
425
 
421
 
426
static bool drm_dp_sideband_parse_remote_dpcd_read(struct drm_dp_sideband_msg_rx *raw,
422
static bool drm_dp_sideband_parse_remote_dpcd_read(struct drm_dp_sideband_msg_rx *raw,
427
						   struct drm_dp_sideband_msg_reply_body *repmsg)
423
						   struct drm_dp_sideband_msg_reply_body *repmsg)
428
{
424
{
429
	int idx = 1;
425
	int idx = 1;
430
	repmsg->u.remote_dpcd_read_ack.port_number = raw->msg[idx] & 0xf;
426
	repmsg->u.remote_dpcd_read_ack.port_number = raw->msg[idx] & 0xf;
431
	idx++;
427
	idx++;
432
	if (idx > raw->curlen)
428
	if (idx > raw->curlen)
433
		goto fail_len;
429
		goto fail_len;
434
	repmsg->u.remote_dpcd_read_ack.num_bytes = raw->msg[idx];
430
	repmsg->u.remote_dpcd_read_ack.num_bytes = raw->msg[idx];
435
	if (idx > raw->curlen)
431
	if (idx > raw->curlen)
436
		goto fail_len;
432
		goto fail_len;
437
 
433
 
438
	memcpy(repmsg->u.remote_dpcd_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_dpcd_read_ack.num_bytes);
434
	memcpy(repmsg->u.remote_dpcd_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_dpcd_read_ack.num_bytes);
439
	return true;
435
	return true;
440
fail_len:
436
fail_len:
441
	DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen);
437
	DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen);
442
	return false;
438
	return false;
443
}
439
}
444
 
440
 
445
static bool drm_dp_sideband_parse_remote_dpcd_write(struct drm_dp_sideband_msg_rx *raw,
441
static bool drm_dp_sideband_parse_remote_dpcd_write(struct drm_dp_sideband_msg_rx *raw,
446
						      struct drm_dp_sideband_msg_reply_body *repmsg)
442
						      struct drm_dp_sideband_msg_reply_body *repmsg)
447
{
443
{
448
	int idx = 1;
444
	int idx = 1;
449
	repmsg->u.remote_dpcd_write_ack.port_number = raw->msg[idx] & 0xf;
445
	repmsg->u.remote_dpcd_write_ack.port_number = raw->msg[idx] & 0xf;
450
	idx++;
446
	idx++;
451
	if (idx > raw->curlen)
447
	if (idx > raw->curlen)
452
		goto fail_len;
448
		goto fail_len;
453
	return true;
449
	return true;
454
fail_len:
450
fail_len:
455
	DRM_DEBUG_KMS("parse length fail %d %d\n", idx, raw->curlen);
451
	DRM_DEBUG_KMS("parse length fail %d %d\n", idx, raw->curlen);
456
	return false;
452
	return false;
457
}
453
}
458
 
454
 
459
static bool drm_dp_sideband_parse_remote_i2c_read_ack(struct drm_dp_sideband_msg_rx *raw,
455
static bool drm_dp_sideband_parse_remote_i2c_read_ack(struct drm_dp_sideband_msg_rx *raw,
460
						      struct drm_dp_sideband_msg_reply_body *repmsg)
456
						      struct drm_dp_sideband_msg_reply_body *repmsg)
461
{
457
{
462
	int idx = 1;
458
	int idx = 1;
463
 
459
 
464
	repmsg->u.remote_i2c_read_ack.port_number = (raw->msg[idx] & 0xf);
460
	repmsg->u.remote_i2c_read_ack.port_number = (raw->msg[idx] & 0xf);
465
	idx++;
461
	idx++;
466
	if (idx > raw->curlen)
462
	if (idx > raw->curlen)
467
		goto fail_len;
463
		goto fail_len;
468
	repmsg->u.remote_i2c_read_ack.num_bytes = raw->msg[idx];
464
	repmsg->u.remote_i2c_read_ack.num_bytes = raw->msg[idx];
469
	idx++;
465
	idx++;
470
	/* TODO check */
466
	/* TODO check */
471
	memcpy(repmsg->u.remote_i2c_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_i2c_read_ack.num_bytes);
467
	memcpy(repmsg->u.remote_i2c_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_i2c_read_ack.num_bytes);
472
	return true;
468
	return true;
473
fail_len:
469
fail_len:
474
	DRM_DEBUG_KMS("remote i2c reply parse length fail %d %d\n", idx, raw->curlen);
470
	DRM_DEBUG_KMS("remote i2c reply parse length fail %d %d\n", idx, raw->curlen);
475
	return false;
471
	return false;
476
}
472
}
477
 
473
 
478
static bool drm_dp_sideband_parse_enum_path_resources_ack(struct drm_dp_sideband_msg_rx *raw,
474
static bool drm_dp_sideband_parse_enum_path_resources_ack(struct drm_dp_sideband_msg_rx *raw,
479
							  struct drm_dp_sideband_msg_reply_body *repmsg)
475
							  struct drm_dp_sideband_msg_reply_body *repmsg)
480
{
476
{
481
	int idx = 1;
477
	int idx = 1;
482
	repmsg->u.path_resources.port_number = (raw->msg[idx] >> 4) & 0xf;
478
	repmsg->u.path_resources.port_number = (raw->msg[idx] >> 4) & 0xf;
483
	idx++;
479
	idx++;
484
	if (idx > raw->curlen)
480
	if (idx > raw->curlen)
485
		goto fail_len;
481
		goto fail_len;
486
	repmsg->u.path_resources.full_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
482
	repmsg->u.path_resources.full_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
487
	idx += 2;
483
	idx += 2;
488
	if (idx > raw->curlen)
484
	if (idx > raw->curlen)
489
		goto fail_len;
485
		goto fail_len;
490
	repmsg->u.path_resources.avail_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
486
	repmsg->u.path_resources.avail_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
491
	idx += 2;
487
	idx += 2;
492
	if (idx > raw->curlen)
488
	if (idx > raw->curlen)
493
		goto fail_len;
489
		goto fail_len;
494
	return true;
490
	return true;
495
fail_len:
491
fail_len:
496
	DRM_DEBUG_KMS("enum resource parse length fail %d %d\n", idx, raw->curlen);
492
	DRM_DEBUG_KMS("enum resource parse length fail %d %d\n", idx, raw->curlen);
497
	return false;
493
	return false;
498
}
494
}
499
 
495
 
500
static bool drm_dp_sideband_parse_allocate_payload_ack(struct drm_dp_sideband_msg_rx *raw,
496
static bool drm_dp_sideband_parse_allocate_payload_ack(struct drm_dp_sideband_msg_rx *raw,
501
							  struct drm_dp_sideband_msg_reply_body *repmsg)
497
							  struct drm_dp_sideband_msg_reply_body *repmsg)
502
{
498
{
503
	int idx = 1;
499
	int idx = 1;
504
	repmsg->u.allocate_payload.port_number = (raw->msg[idx] >> 4) & 0xf;
500
	repmsg->u.allocate_payload.port_number = (raw->msg[idx] >> 4) & 0xf;
505
	idx++;
501
	idx++;
506
	if (idx > raw->curlen)
502
	if (idx > raw->curlen)
507
		goto fail_len;
503
		goto fail_len;
508
	repmsg->u.allocate_payload.vcpi = raw->msg[idx];
504
	repmsg->u.allocate_payload.vcpi = raw->msg[idx];
509
	idx++;
505
	idx++;
510
	if (idx > raw->curlen)
506
	if (idx > raw->curlen)
511
		goto fail_len;
507
		goto fail_len;
512
	repmsg->u.allocate_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
508
	repmsg->u.allocate_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
513
	idx += 2;
509
	idx += 2;
514
	if (idx > raw->curlen)
510
	if (idx > raw->curlen)
515
		goto fail_len;
511
		goto fail_len;
516
	return true;
512
	return true;
517
fail_len:
513
fail_len:
518
	DRM_DEBUG_KMS("allocate payload parse length fail %d %d\n", idx, raw->curlen);
514
	DRM_DEBUG_KMS("allocate payload parse length fail %d %d\n", idx, raw->curlen);
519
	return false;
515
	return false;
520
}
516
}
521
 
517
 
522
static bool drm_dp_sideband_parse_query_payload_ack(struct drm_dp_sideband_msg_rx *raw,
518
static bool drm_dp_sideband_parse_query_payload_ack(struct drm_dp_sideband_msg_rx *raw,
523
						    struct drm_dp_sideband_msg_reply_body *repmsg)
519
						    struct drm_dp_sideband_msg_reply_body *repmsg)
524
{
520
{
525
	int idx = 1;
521
	int idx = 1;
526
	repmsg->u.query_payload.port_number = (raw->msg[idx] >> 4) & 0xf;
522
	repmsg->u.query_payload.port_number = (raw->msg[idx] >> 4) & 0xf;
527
	idx++;
523
	idx++;
528
	if (idx > raw->curlen)
524
	if (idx > raw->curlen)
529
		goto fail_len;
525
		goto fail_len;
530
	repmsg->u.query_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]);
526
	repmsg->u.query_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]);
531
	idx += 2;
527
	idx += 2;
532
	if (idx > raw->curlen)
528
	if (idx > raw->curlen)
533
		goto fail_len;
529
		goto fail_len;
534
	return true;
530
	return true;
535
fail_len:
531
fail_len:
536
	DRM_DEBUG_KMS("query payload parse length fail %d %d\n", idx, raw->curlen);
532
	DRM_DEBUG_KMS("query payload parse length fail %d %d\n", idx, raw->curlen);
537
	return false;
533
	return false;
538
}
534
}
539
 
535
 
540
static bool drm_dp_sideband_parse_reply(struct drm_dp_sideband_msg_rx *raw,
536
static bool drm_dp_sideband_parse_reply(struct drm_dp_sideband_msg_rx *raw,
541
					struct drm_dp_sideband_msg_reply_body *msg)
537
					struct drm_dp_sideband_msg_reply_body *msg)
542
{
538
{
543
	memset(msg, 0, sizeof(*msg));
539
	memset(msg, 0, sizeof(*msg));
544
	msg->reply_type = (raw->msg[0] & 0x80) >> 7;
540
	msg->reply_type = (raw->msg[0] & 0x80) >> 7;
545
	msg->req_type = (raw->msg[0] & 0x7f);
541
	msg->req_type = (raw->msg[0] & 0x7f);
546
 
542
 
547
	if (msg->reply_type) {
543
	if (msg->reply_type) {
548
		memcpy(msg->u.nak.guid, &raw->msg[1], 16);
544
		memcpy(msg->u.nak.guid, &raw->msg[1], 16);
549
		msg->u.nak.reason = raw->msg[17];
545
		msg->u.nak.reason = raw->msg[17];
550
		msg->u.nak.nak_data = raw->msg[18];
546
		msg->u.nak.nak_data = raw->msg[18];
551
		return false;
547
		return false;
552
	}
548
	}
553
 
549
 
554
	switch (msg->req_type) {
550
	switch (msg->req_type) {
555
	case DP_LINK_ADDRESS:
551
	case DP_LINK_ADDRESS:
556
		return drm_dp_sideband_parse_link_address(raw, msg);
552
		return drm_dp_sideband_parse_link_address(raw, msg);
557
	case DP_QUERY_PAYLOAD:
553
	case DP_QUERY_PAYLOAD:
558
		return drm_dp_sideband_parse_query_payload_ack(raw, msg);
554
		return drm_dp_sideband_parse_query_payload_ack(raw, msg);
559
	case DP_REMOTE_DPCD_READ:
555
	case DP_REMOTE_DPCD_READ:
560
		return drm_dp_sideband_parse_remote_dpcd_read(raw, msg);
556
		return drm_dp_sideband_parse_remote_dpcd_read(raw, msg);
561
	case DP_REMOTE_DPCD_WRITE:
557
	case DP_REMOTE_DPCD_WRITE:
562
		return drm_dp_sideband_parse_remote_dpcd_write(raw, msg);
558
		return drm_dp_sideband_parse_remote_dpcd_write(raw, msg);
563
	case DP_REMOTE_I2C_READ:
559
	case DP_REMOTE_I2C_READ:
564
		return drm_dp_sideband_parse_remote_i2c_read_ack(raw, msg);
560
		return drm_dp_sideband_parse_remote_i2c_read_ack(raw, msg);
565
	case DP_ENUM_PATH_RESOURCES:
561
	case DP_ENUM_PATH_RESOURCES:
566
		return drm_dp_sideband_parse_enum_path_resources_ack(raw, msg);
562
		return drm_dp_sideband_parse_enum_path_resources_ack(raw, msg);
567
	case DP_ALLOCATE_PAYLOAD:
563
	case DP_ALLOCATE_PAYLOAD:
568
		return drm_dp_sideband_parse_allocate_payload_ack(raw, msg);
564
		return drm_dp_sideband_parse_allocate_payload_ack(raw, msg);
569
	default:
565
	default:
570
		DRM_ERROR("Got unknown reply 0x%02x\n", msg->req_type);
566
		DRM_ERROR("Got unknown reply 0x%02x\n", msg->req_type);
571
		return false;
567
		return false;
572
	}
568
	}
573
}
569
}
574
 
570
 
575
static bool drm_dp_sideband_parse_connection_status_notify(struct drm_dp_sideband_msg_rx *raw,
571
static bool drm_dp_sideband_parse_connection_status_notify(struct drm_dp_sideband_msg_rx *raw,
576
							   struct drm_dp_sideband_msg_req_body *msg)
572
							   struct drm_dp_sideband_msg_req_body *msg)
577
{
573
{
578
	int idx = 1;
574
	int idx = 1;
579
 
575
 
580
	msg->u.conn_stat.port_number = (raw->msg[idx] & 0xf0) >> 4;
576
	msg->u.conn_stat.port_number = (raw->msg[idx] & 0xf0) >> 4;
581
	idx++;
577
	idx++;
582
	if (idx > raw->curlen)
578
	if (idx > raw->curlen)
583
		goto fail_len;
579
		goto fail_len;
584
 
580
 
585
	memcpy(msg->u.conn_stat.guid, &raw->msg[idx], 16);
581
	memcpy(msg->u.conn_stat.guid, &raw->msg[idx], 16);
586
	idx += 16;
582
	idx += 16;
587
	if (idx > raw->curlen)
583
	if (idx > raw->curlen)
588
		goto fail_len;
584
		goto fail_len;
589
 
585
 
590
	msg->u.conn_stat.legacy_device_plug_status = (raw->msg[idx] >> 6) & 0x1;
586
	msg->u.conn_stat.legacy_device_plug_status = (raw->msg[idx] >> 6) & 0x1;
591
	msg->u.conn_stat.displayport_device_plug_status = (raw->msg[idx] >> 5) & 0x1;
587
	msg->u.conn_stat.displayport_device_plug_status = (raw->msg[idx] >> 5) & 0x1;
592
	msg->u.conn_stat.message_capability_status = (raw->msg[idx] >> 4) & 0x1;
588
	msg->u.conn_stat.message_capability_status = (raw->msg[idx] >> 4) & 0x1;
593
	msg->u.conn_stat.input_port = (raw->msg[idx] >> 3) & 0x1;
589
	msg->u.conn_stat.input_port = (raw->msg[idx] >> 3) & 0x1;
594
	msg->u.conn_stat.peer_device_type = (raw->msg[idx] & 0x7);
590
	msg->u.conn_stat.peer_device_type = (raw->msg[idx] & 0x7);
595
	idx++;
591
	idx++;
596
	return true;
592
	return true;
597
fail_len:
593
fail_len:
598
	DRM_DEBUG_KMS("connection status reply parse length fail %d %d\n", idx, raw->curlen);
594
	DRM_DEBUG_KMS("connection status reply parse length fail %d %d\n", idx, raw->curlen);
599
	return false;
595
	return false;
600
}
596
}
601
 
597
 
602
static bool drm_dp_sideband_parse_resource_status_notify(struct drm_dp_sideband_msg_rx *raw,
598
static bool drm_dp_sideband_parse_resource_status_notify(struct drm_dp_sideband_msg_rx *raw,
603
							   struct drm_dp_sideband_msg_req_body *msg)
599
							   struct drm_dp_sideband_msg_req_body *msg)
604
{
600
{
605
	int idx = 1;
601
	int idx = 1;
606
 
602
 
607
	msg->u.resource_stat.port_number = (raw->msg[idx] & 0xf0) >> 4;
603
	msg->u.resource_stat.port_number = (raw->msg[idx] & 0xf0) >> 4;
608
	idx++;
604
	idx++;
609
	if (idx > raw->curlen)
605
	if (idx > raw->curlen)
610
		goto fail_len;
606
		goto fail_len;
611
 
607
 
612
	memcpy(msg->u.resource_stat.guid, &raw->msg[idx], 16);
608
	memcpy(msg->u.resource_stat.guid, &raw->msg[idx], 16);
613
	idx += 16;
609
	idx += 16;
614
	if (idx > raw->curlen)
610
	if (idx > raw->curlen)
615
		goto fail_len;
611
		goto fail_len;
616
 
612
 
617
	msg->u.resource_stat.available_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]);
613
	msg->u.resource_stat.available_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]);
618
	idx++;
614
	idx++;
619
	return true;
615
	return true;
620
fail_len:
616
fail_len:
621
	DRM_DEBUG_KMS("resource status reply parse length fail %d %d\n", idx, raw->curlen);
617
	DRM_DEBUG_KMS("resource status reply parse length fail %d %d\n", idx, raw->curlen);
622
	return false;
618
	return false;
623
}
619
}
624
 
620
 
625
static bool drm_dp_sideband_parse_req(struct drm_dp_sideband_msg_rx *raw,
621
static bool drm_dp_sideband_parse_req(struct drm_dp_sideband_msg_rx *raw,
626
				      struct drm_dp_sideband_msg_req_body *msg)
622
				      struct drm_dp_sideband_msg_req_body *msg)
627
{
623
{
628
	memset(msg, 0, sizeof(*msg));
624
	memset(msg, 0, sizeof(*msg));
629
	msg->req_type = (raw->msg[0] & 0x7f);
625
	msg->req_type = (raw->msg[0] & 0x7f);
630
 
626
 
631
	switch (msg->req_type) {
627
	switch (msg->req_type) {
632
	case DP_CONNECTION_STATUS_NOTIFY:
628
	case DP_CONNECTION_STATUS_NOTIFY:
633
		return drm_dp_sideband_parse_connection_status_notify(raw, msg);
629
		return drm_dp_sideband_parse_connection_status_notify(raw, msg);
634
	case DP_RESOURCE_STATUS_NOTIFY:
630
	case DP_RESOURCE_STATUS_NOTIFY:
635
		return drm_dp_sideband_parse_resource_status_notify(raw, msg);
631
		return drm_dp_sideband_parse_resource_status_notify(raw, msg);
636
	default:
632
	default:
637
		DRM_ERROR("Got unknown request 0x%02x\n", msg->req_type);
633
		DRM_ERROR("Got unknown request 0x%02x\n", msg->req_type);
638
		return false;
634
		return false;
639
	}
635
	}
640
}
636
}
641
 
637
 
642
static int build_dpcd_write(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32 offset, u8 num_bytes, u8 *bytes)
638
static int build_dpcd_write(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32 offset, u8 num_bytes, u8 *bytes)
643
{
639
{
644
	struct drm_dp_sideband_msg_req_body req;
640
	struct drm_dp_sideband_msg_req_body req;
645
 
641
 
646
	req.req_type = DP_REMOTE_DPCD_WRITE;
642
	req.req_type = DP_REMOTE_DPCD_WRITE;
647
	req.u.dpcd_write.port_number = port_num;
643
	req.u.dpcd_write.port_number = port_num;
648
	req.u.dpcd_write.dpcd_address = offset;
644
	req.u.dpcd_write.dpcd_address = offset;
649
	req.u.dpcd_write.num_bytes = num_bytes;
645
	req.u.dpcd_write.num_bytes = num_bytes;
650
	req.u.dpcd_write.bytes = bytes;
646
	req.u.dpcd_write.bytes = bytes;
651
	drm_dp_encode_sideband_req(&req, msg);
647
	drm_dp_encode_sideband_req(&req, msg);
652
 
648
 
653
	return 0;
649
	return 0;
654
}
650
}
655
 
651
 
656
static int build_link_address(struct drm_dp_sideband_msg_tx *msg)
652
static int build_link_address(struct drm_dp_sideband_msg_tx *msg)
657
{
653
{
658
	struct drm_dp_sideband_msg_req_body req;
654
	struct drm_dp_sideband_msg_req_body req;
659
 
655
 
660
	req.req_type = DP_LINK_ADDRESS;
656
	req.req_type = DP_LINK_ADDRESS;
661
	drm_dp_encode_sideband_req(&req, msg);
657
	drm_dp_encode_sideband_req(&req, msg);
662
	return 0;
658
	return 0;
663
}
659
}
664
 
660
 
665
static int build_enum_path_resources(struct drm_dp_sideband_msg_tx *msg, int port_num)
661
static int build_enum_path_resources(struct drm_dp_sideband_msg_tx *msg, int port_num)
666
{
662
{
667
	struct drm_dp_sideband_msg_req_body req;
663
	struct drm_dp_sideband_msg_req_body req;
668
 
664
 
669
	req.req_type = DP_ENUM_PATH_RESOURCES;
665
	req.req_type = DP_ENUM_PATH_RESOURCES;
670
	req.u.port_num.port_number = port_num;
666
	req.u.port_num.port_number = port_num;
671
	drm_dp_encode_sideband_req(&req, msg);
667
	drm_dp_encode_sideband_req(&req, msg);
672
	msg->path_msg = true;
668
	msg->path_msg = true;
673
	return 0;
669
	return 0;
674
}
670
}
675
 
671
 
676
static int build_allocate_payload(struct drm_dp_sideband_msg_tx *msg, int port_num,
672
static int build_allocate_payload(struct drm_dp_sideband_msg_tx *msg, int port_num,
677
				  u8 vcpi, uint16_t pbn)
673
				  u8 vcpi, uint16_t pbn,
-
 
674
				  u8 number_sdp_streams,
-
 
675
				  u8 *sdp_stream_sink)
678
{
676
{
679
	struct drm_dp_sideband_msg_req_body req;
677
	struct drm_dp_sideband_msg_req_body req;
680
	memset(&req, 0, sizeof(req));
678
	memset(&req, 0, sizeof(req));
681
	req.req_type = DP_ALLOCATE_PAYLOAD;
679
	req.req_type = DP_ALLOCATE_PAYLOAD;
682
	req.u.allocate_payload.port_number = port_num;
680
	req.u.allocate_payload.port_number = port_num;
683
	req.u.allocate_payload.vcpi = vcpi;
681
	req.u.allocate_payload.vcpi = vcpi;
684
	req.u.allocate_payload.pbn = pbn;
682
	req.u.allocate_payload.pbn = pbn;
-
 
683
	req.u.allocate_payload.number_sdp_streams = number_sdp_streams;
-
 
684
	memcpy(req.u.allocate_payload.sdp_stream_sink, sdp_stream_sink,
-
 
685
		   number_sdp_streams);
685
	drm_dp_encode_sideband_req(&req, msg);
686
	drm_dp_encode_sideband_req(&req, msg);
686
	msg->path_msg = true;
687
	msg->path_msg = true;
687
	return 0;
688
	return 0;
688
}
689
}
689
 
690
 
690
static int drm_dp_mst_assign_payload_id(struct drm_dp_mst_topology_mgr *mgr,
691
static int drm_dp_mst_assign_payload_id(struct drm_dp_mst_topology_mgr *mgr,
691
					struct drm_dp_vcpi *vcpi)
692
					struct drm_dp_vcpi *vcpi)
692
{
693
{
693
	int ret, vcpi_ret;
694
	int ret, vcpi_ret;
694
 
695
 
695
	mutex_lock(&mgr->payload_lock);
696
	mutex_lock(&mgr->payload_lock);
696
	ret = find_first_zero_bit(&mgr->payload_mask, mgr->max_payloads + 1);
697
	ret = find_first_zero_bit(&mgr->payload_mask, mgr->max_payloads + 1);
697
	if (ret > mgr->max_payloads) {
698
	if (ret > mgr->max_payloads) {
698
		ret = -EINVAL;
699
		ret = -EINVAL;
699
		DRM_DEBUG_KMS("out of payload ids %d\n", ret);
700
		DRM_DEBUG_KMS("out of payload ids %d\n", ret);
700
		goto out_unlock;
701
		goto out_unlock;
701
	}
702
	}
702
 
703
 
703
	vcpi_ret = find_first_zero_bit(&mgr->vcpi_mask, mgr->max_payloads + 1);
704
	vcpi_ret = find_first_zero_bit(&mgr->vcpi_mask, mgr->max_payloads + 1);
704
	if (vcpi_ret > mgr->max_payloads) {
705
	if (vcpi_ret > mgr->max_payloads) {
705
		ret = -EINVAL;
706
		ret = -EINVAL;
706
		DRM_DEBUG_KMS("out of vcpi ids %d\n", ret);
707
		DRM_DEBUG_KMS("out of vcpi ids %d\n", ret);
707
		goto out_unlock;
708
		goto out_unlock;
708
	}
709
	}
709
 
710
 
710
	set_bit(ret, &mgr->payload_mask);
711
	set_bit(ret, &mgr->payload_mask);
711
	set_bit(vcpi_ret, &mgr->vcpi_mask);
712
	set_bit(vcpi_ret, &mgr->vcpi_mask);
712
	vcpi->vcpi = vcpi_ret + 1;
713
	vcpi->vcpi = vcpi_ret + 1;
713
	mgr->proposed_vcpis[ret - 1] = vcpi;
714
	mgr->proposed_vcpis[ret - 1] = vcpi;
714
out_unlock:
715
out_unlock:
715
	mutex_unlock(&mgr->payload_lock);
716
	mutex_unlock(&mgr->payload_lock);
716
	return ret;
717
	return ret;
717
}
718
}
718
 
719
 
719
static void drm_dp_mst_put_payload_id(struct drm_dp_mst_topology_mgr *mgr,
720
static void drm_dp_mst_put_payload_id(struct drm_dp_mst_topology_mgr *mgr,
720
				      int vcpi)
721
				      int vcpi)
721
{
722
{
722
	int i;
723
	int i;
723
	if (vcpi == 0)
724
	if (vcpi == 0)
724
		return;
725
		return;
725
 
726
 
726
	mutex_lock(&mgr->payload_lock);
727
	mutex_lock(&mgr->payload_lock);
727
	DRM_DEBUG_KMS("putting payload %d\n", vcpi);
728
	DRM_DEBUG_KMS("putting payload %d\n", vcpi);
728
	clear_bit(vcpi - 1, &mgr->vcpi_mask);
729
	clear_bit(vcpi - 1, &mgr->vcpi_mask);
729
 
730
 
730
	for (i = 0; i < mgr->max_payloads; i++) {
731
	for (i = 0; i < mgr->max_payloads; i++) {
731
		if (mgr->proposed_vcpis[i])
732
		if (mgr->proposed_vcpis[i])
732
			if (mgr->proposed_vcpis[i]->vcpi == vcpi) {
733
			if (mgr->proposed_vcpis[i]->vcpi == vcpi) {
733
				mgr->proposed_vcpis[i] = NULL;
734
				mgr->proposed_vcpis[i] = NULL;
734
				clear_bit(i + 1, &mgr->payload_mask);
735
				clear_bit(i + 1, &mgr->payload_mask);
735
			}
736
			}
736
	}
737
	}
737
	mutex_unlock(&mgr->payload_lock);
738
	mutex_unlock(&mgr->payload_lock);
738
}
739
}
739
 
740
 
740
static bool check_txmsg_state(struct drm_dp_mst_topology_mgr *mgr,
741
static bool check_txmsg_state(struct drm_dp_mst_topology_mgr *mgr,
741
			      struct drm_dp_sideband_msg_tx *txmsg)
742
			      struct drm_dp_sideband_msg_tx *txmsg)
742
{
743
{
743
	bool ret;
744
	bool ret;
744
 
745
 
745
	/*
746
	/*
746
	 * All updates to txmsg->state are protected by mgr->qlock, and the two
747
	 * All updates to txmsg->state are protected by mgr->qlock, and the two
747
	 * cases we check here are terminal states. For those the barriers
748
	 * cases we check here are terminal states. For those the barriers
748
	 * provided by the wake_up/wait_event pair are enough.
749
	 * provided by the wake_up/wait_event pair are enough.
749
	 */
750
	 */
750
	ret = (txmsg->state == DRM_DP_SIDEBAND_TX_RX ||
751
	ret = (txmsg->state == DRM_DP_SIDEBAND_TX_RX ||
751
	       txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT);
752
	       txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT);
752
	return ret;
753
	return ret;
753
}
754
}
754
 
755
 
755
static int drm_dp_mst_wait_tx_reply(struct drm_dp_mst_branch *mstb,
756
static int drm_dp_mst_wait_tx_reply(struct drm_dp_mst_branch *mstb,
756
				    struct drm_dp_sideband_msg_tx *txmsg)
757
				    struct drm_dp_sideband_msg_tx *txmsg)
757
{
758
{
758
	struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
759
	struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
759
	int ret;
760
	int ret;
760
 
761
 
761
	ret = wait_event_timeout(mgr->tx_waitq,
762
	ret = wait_event_timeout(mgr->tx_waitq,
762
				 check_txmsg_state(mgr, txmsg),
763
				 check_txmsg_state(mgr, txmsg),
763
				 (4 * HZ));
764
				 (4 * HZ));
764
	mutex_lock(&mstb->mgr->qlock);
765
	mutex_lock(&mstb->mgr->qlock);
765
	if (ret > 0) {
766
	if (ret > 0) {
766
		if (txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT) {
767
		if (txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT) {
767
			ret = -EIO;
768
			ret = -EIO;
768
			goto out;
769
			goto out;
769
		}
770
		}
770
	} else {
771
	} else {
771
		DRM_DEBUG_KMS("timedout msg send %p %d %d\n", txmsg, txmsg->state, txmsg->seqno);
772
		DRM_DEBUG_KMS("timedout msg send %p %d %d\n", txmsg, txmsg->state, txmsg->seqno);
772
 
773
 
773
		/* dump some state */
774
		/* dump some state */
774
		ret = -EIO;
775
		ret = -EIO;
775
 
776
 
776
		/* remove from q */
777
		/* remove from q */
777
		if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED ||
778
		if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED ||
778
		    txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND) {
779
		    txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND) {
779
			list_del(&txmsg->next);
780
			list_del(&txmsg->next);
780
		}
781
		}
781
 
782
 
782
		if (txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND ||
783
		if (txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND ||
783
		    txmsg->state == DRM_DP_SIDEBAND_TX_SENT) {
784
		    txmsg->state == DRM_DP_SIDEBAND_TX_SENT) {
784
			mstb->tx_slots[txmsg->seqno] = NULL;
785
			mstb->tx_slots[txmsg->seqno] = NULL;
785
		}
786
		}
786
	}
787
	}
787
out:
788
out:
788
	mutex_unlock(&mgr->qlock);
789
	mutex_unlock(&mgr->qlock);
789
 
790
 
790
	return ret;
791
	return ret;
791
}
792
}
792
 
793
 
793
static struct drm_dp_mst_branch *drm_dp_add_mst_branch_device(u8 lct, u8 *rad)
794
static struct drm_dp_mst_branch *drm_dp_add_mst_branch_device(u8 lct, u8 *rad)
794
{
795
{
795
	struct drm_dp_mst_branch *mstb;
796
	struct drm_dp_mst_branch *mstb;
796
 
797
 
797
	mstb = kzalloc(sizeof(*mstb), GFP_KERNEL);
798
	mstb = kzalloc(sizeof(*mstb), GFP_KERNEL);
798
	if (!mstb)
799
	if (!mstb)
799
		return NULL;
800
		return NULL;
800
 
801
 
801
	mstb->lct = lct;
802
	mstb->lct = lct;
802
	if (lct > 1)
803
	if (lct > 1)
803
		memcpy(mstb->rad, rad, lct / 2);
804
		memcpy(mstb->rad, rad, lct / 2);
804
	INIT_LIST_HEAD(&mstb->ports);
805
	INIT_LIST_HEAD(&mstb->ports);
805
	kref_init(&mstb->kref);
806
	kref_init(&mstb->kref);
806
	return mstb;
807
	return mstb;
807
}
808
}
808
 
809
 
809
static void drm_dp_free_mst_port(struct kref *kref);
810
static void drm_dp_free_mst_port(struct kref *kref);
810
 
811
 
811
static void drm_dp_free_mst_branch_device(struct kref *kref)
812
static void drm_dp_free_mst_branch_device(struct kref *kref)
812
{
813
{
813
	struct drm_dp_mst_branch *mstb = container_of(kref, struct drm_dp_mst_branch, kref);
814
	struct drm_dp_mst_branch *mstb = container_of(kref, struct drm_dp_mst_branch, kref);
814
	if (mstb->port_parent) {
815
	if (mstb->port_parent) {
815
		if (list_empty(&mstb->port_parent->next))
816
		if (list_empty(&mstb->port_parent->next))
816
			kref_put(&mstb->port_parent->kref, drm_dp_free_mst_port);
817
			kref_put(&mstb->port_parent->kref, drm_dp_free_mst_port);
817
	}
818
	}
818
	kfree(mstb);
819
	kfree(mstb);
819
}
820
}
820
 
821
 
821
static void drm_dp_destroy_mst_branch_device(struct kref *kref)
822
static void drm_dp_destroy_mst_branch_device(struct kref *kref)
822
{
823
{
823
	struct drm_dp_mst_branch *mstb = container_of(kref, struct drm_dp_mst_branch, kref);
824
	struct drm_dp_mst_branch *mstb = container_of(kref, struct drm_dp_mst_branch, kref);
824
	struct drm_dp_mst_port *port, *tmp;
825
	struct drm_dp_mst_port *port, *tmp;
825
	bool wake_tx = false;
826
	bool wake_tx = false;
826
 
827
 
827
	/*
828
	/*
828
	 * init kref again to be used by ports to remove mst branch when it is
829
	 * init kref again to be used by ports to remove mst branch when it is
829
	 * not needed anymore
830
	 * not needed anymore
830
	 */
831
	 */
831
	kref_init(kref);
832
	kref_init(kref);
832
 
833
 
833
	if (mstb->port_parent && list_empty(&mstb->port_parent->next))
834
	if (mstb->port_parent && list_empty(&mstb->port_parent->next))
834
		kref_get(&mstb->port_parent->kref);
835
		kref_get(&mstb->port_parent->kref);
835
 
836
 
836
	/*
837
	/*
837
	 * destroy all ports - don't need lock
838
	 * destroy all ports - don't need lock
838
	 * as there are no more references to the mst branch
839
	 * as there are no more references to the mst branch
839
	 * device at this point.
840
	 * device at this point.
840
	 */
841
	 */
841
	list_for_each_entry_safe(port, tmp, &mstb->ports, next) {
842
	list_for_each_entry_safe(port, tmp, &mstb->ports, next) {
842
		list_del(&port->next);
843
		list_del(&port->next);
843
		drm_dp_put_port(port);
844
		drm_dp_put_port(port);
844
	}
845
	}
845
 
846
 
846
	/* drop any tx slots msg */
847
	/* drop any tx slots msg */
847
	mutex_lock(&mstb->mgr->qlock);
848
	mutex_lock(&mstb->mgr->qlock);
848
	if (mstb->tx_slots[0]) {
849
	if (mstb->tx_slots[0]) {
849
		mstb->tx_slots[0]->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
850
		mstb->tx_slots[0]->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
850
		mstb->tx_slots[0] = NULL;
851
		mstb->tx_slots[0] = NULL;
851
		wake_tx = true;
852
		wake_tx = true;
852
	}
853
	}
853
	if (mstb->tx_slots[1]) {
854
	if (mstb->tx_slots[1]) {
854
		mstb->tx_slots[1]->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
855
		mstb->tx_slots[1]->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
855
		mstb->tx_slots[1] = NULL;
856
		mstb->tx_slots[1] = NULL;
856
		wake_tx = true;
857
		wake_tx = true;
857
	}
858
	}
858
	mutex_unlock(&mstb->mgr->qlock);
859
	mutex_unlock(&mstb->mgr->qlock);
859
 
860
 
860
//   if (wake_tx)
861
//   if (wake_tx)
861
//       wake_up(&mstb->mgr->tx_waitq);
862
//       wake_up(&mstb->mgr->tx_waitq);
862
	kref_put(kref, drm_dp_free_mst_branch_device);
863
	kref_put(kref, drm_dp_free_mst_branch_device);
863
}
864
}
864
 
865
 
865
static void drm_dp_put_mst_branch_device(struct drm_dp_mst_branch *mstb)
866
static void drm_dp_put_mst_branch_device(struct drm_dp_mst_branch *mstb)
866
{
867
{
867
	kref_put(&mstb->kref, drm_dp_destroy_mst_branch_device);
868
	kref_put(&mstb->kref, drm_dp_destroy_mst_branch_device);
868
}
869
}
869
 
870
 
870
 
871
 
871
static void drm_dp_port_teardown_pdt(struct drm_dp_mst_port *port, int old_pdt)
872
static void drm_dp_port_teardown_pdt(struct drm_dp_mst_port *port, int old_pdt)
872
{
873
{
873
	struct drm_dp_mst_branch *mstb;
874
	struct drm_dp_mst_branch *mstb;
874
 
875
 
875
	switch (old_pdt) {
876
	switch (old_pdt) {
876
	case DP_PEER_DEVICE_DP_LEGACY_CONV:
877
	case DP_PEER_DEVICE_DP_LEGACY_CONV:
877
	case DP_PEER_DEVICE_SST_SINK:
878
	case DP_PEER_DEVICE_SST_SINK:
878
		/* remove i2c over sideband */
879
		/* remove i2c over sideband */
879
		drm_dp_mst_unregister_i2c_bus(&port->aux);
880
		drm_dp_mst_unregister_i2c_bus(&port->aux);
880
		break;
881
		break;
881
	case DP_PEER_DEVICE_MST_BRANCHING:
882
	case DP_PEER_DEVICE_MST_BRANCHING:
882
		mstb = port->mstb;
883
		mstb = port->mstb;
883
		port->mstb = NULL;
884
		port->mstb = NULL;
884
		drm_dp_put_mst_branch_device(mstb);
885
		drm_dp_put_mst_branch_device(mstb);
885
		break;
886
		break;
886
	}
887
	}
887
}
888
}
888
 
889
 
889
static void drm_dp_destroy_port(struct kref *kref)
890
static void drm_dp_destroy_port(struct kref *kref)
890
{
891
{
891
	struct drm_dp_mst_port *port = container_of(kref, struct drm_dp_mst_port, kref);
892
	struct drm_dp_mst_port *port = container_of(kref, struct drm_dp_mst_port, kref);
892
	struct drm_dp_mst_topology_mgr *mgr = port->mgr;
893
	struct drm_dp_mst_topology_mgr *mgr = port->mgr;
893
 
894
 
894
	if (!port->input) {
895
	if (!port->input) {
895
		port->vcpi.num_slots = 0;
896
		port->vcpi.num_slots = 0;
896
 
897
 
897
		kfree(port->cached_edid);
898
		kfree(port->cached_edid);
898
 
899
 
899
		/*
900
		/*
900
		 * The only time we don't have a connector
901
		 * The only time we don't have a connector
901
		 * on an output port is if the connector init
902
		 * on an output port is if the connector init
902
		 * fails.
903
		 * fails.
903
		 */
904
		 */
904
		if (port->connector) {
905
		if (port->connector) {
905
			/* we can't destroy the connector here, as
906
			/* we can't destroy the connector here, as
906
			 * we might be holding the mode_config.mutex
907
			 * we might be holding the mode_config.mutex
907
			 * from an EDID retrieval */
908
			 * from an EDID retrieval */
908
 
909
 
909
			mutex_lock(&mgr->destroy_connector_lock);
910
			mutex_lock(&mgr->destroy_connector_lock);
910
			kref_get(&port->parent->kref);
911
			kref_get(&port->parent->kref);
911
			list_add(&port->next, &mgr->destroy_connector_list);
912
			list_add(&port->next, &mgr->destroy_connector_list);
912
			mutex_unlock(&mgr->destroy_connector_lock);
913
			mutex_unlock(&mgr->destroy_connector_lock);
913
//		schedule_work(&mgr->destroy_connector_work);
914
//		schedule_work(&mgr->destroy_connector_work);
914
			return;
915
			return;
915
		}
916
		}
916
		/* no need to clean up vcpi
917
		/* no need to clean up vcpi
917
		 * as if we have no connector we never setup a vcpi */
918
		 * as if we have no connector we never setup a vcpi */
918
		drm_dp_port_teardown_pdt(port, port->pdt);
919
		drm_dp_port_teardown_pdt(port, port->pdt);
919
		port->pdt = DP_PEER_DEVICE_NONE;
-
 
920
	}
920
	}
921
	kfree(port);
921
	kfree(port);
922
}
922
}
923
 
923
 
924
static void drm_dp_put_port(struct drm_dp_mst_port *port)
924
static void drm_dp_put_port(struct drm_dp_mst_port *port)
925
{
925
{
926
	kref_put(&port->kref, drm_dp_destroy_port);
926
	kref_put(&port->kref, drm_dp_destroy_port);
927
}
927
}
928
 
928
 
929
static struct drm_dp_mst_branch *drm_dp_mst_get_validated_mstb_ref_locked(struct drm_dp_mst_branch *mstb, struct drm_dp_mst_branch *to_find)
929
static struct drm_dp_mst_branch *drm_dp_mst_get_validated_mstb_ref_locked(struct drm_dp_mst_branch *mstb, struct drm_dp_mst_branch *to_find)
930
{
930
{
931
	struct drm_dp_mst_port *port;
931
	struct drm_dp_mst_port *port;
932
	struct drm_dp_mst_branch *rmstb;
932
	struct drm_dp_mst_branch *rmstb;
933
	if (to_find == mstb) {
933
	if (to_find == mstb) {
934
		kref_get(&mstb->kref);
934
		kref_get(&mstb->kref);
935
		return mstb;
935
		return mstb;
936
	}
936
	}
937
	list_for_each_entry(port, &mstb->ports, next) {
937
	list_for_each_entry(port, &mstb->ports, next) {
938
		if (port->mstb) {
938
		if (port->mstb) {
939
			rmstb = drm_dp_mst_get_validated_mstb_ref_locked(port->mstb, to_find);
939
			rmstb = drm_dp_mst_get_validated_mstb_ref_locked(port->mstb, to_find);
940
			if (rmstb)
940
			if (rmstb)
941
				return rmstb;
941
				return rmstb;
942
		}
942
		}
943
	}
943
	}
944
	return NULL;
944
	return NULL;
945
}
945
}
946
 
946
 
947
static struct drm_dp_mst_branch *drm_dp_get_validated_mstb_ref(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_branch *mstb)
947
static struct drm_dp_mst_branch *drm_dp_get_validated_mstb_ref(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_branch *mstb)
948
{
948
{
949
	struct drm_dp_mst_branch *rmstb = NULL;
949
	struct drm_dp_mst_branch *rmstb = NULL;
950
	mutex_lock(&mgr->lock);
950
	mutex_lock(&mgr->lock);
951
	if (mgr->mst_primary)
951
	if (mgr->mst_primary)
952
		rmstb = drm_dp_mst_get_validated_mstb_ref_locked(mgr->mst_primary, mstb);
952
		rmstb = drm_dp_mst_get_validated_mstb_ref_locked(mgr->mst_primary, mstb);
953
	mutex_unlock(&mgr->lock);
953
	mutex_unlock(&mgr->lock);
954
	return rmstb;
954
	return rmstb;
955
}
955
}
956
 
956
 
957
static struct drm_dp_mst_port *drm_dp_mst_get_port_ref_locked(struct drm_dp_mst_branch *mstb, struct drm_dp_mst_port *to_find)
957
static struct drm_dp_mst_port *drm_dp_mst_get_port_ref_locked(struct drm_dp_mst_branch *mstb, struct drm_dp_mst_port *to_find)
958
{
958
{
959
	struct drm_dp_mst_port *port, *mport;
959
	struct drm_dp_mst_port *port, *mport;
960
 
960
 
961
	list_for_each_entry(port, &mstb->ports, next) {
961
	list_for_each_entry(port, &mstb->ports, next) {
962
		if (port == to_find) {
962
		if (port == to_find) {
963
			kref_get(&port->kref);
963
			kref_get(&port->kref);
964
			return port;
964
			return port;
965
		}
965
		}
966
		if (port->mstb) {
966
		if (port->mstb) {
967
			mport = drm_dp_mst_get_port_ref_locked(port->mstb, to_find);
967
			mport = drm_dp_mst_get_port_ref_locked(port->mstb, to_find);
968
			if (mport)
968
			if (mport)
969
				return mport;
969
				return mport;
970
		}
970
		}
971
	}
971
	}
972
	return NULL;
972
	return NULL;
973
}
973
}
974
 
974
 
975
static struct drm_dp_mst_port *drm_dp_get_validated_port_ref(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
975
static struct drm_dp_mst_port *drm_dp_get_validated_port_ref(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
976
{
976
{
977
	struct drm_dp_mst_port *rport = NULL;
977
	struct drm_dp_mst_port *rport = NULL;
978
	mutex_lock(&mgr->lock);
978
	mutex_lock(&mgr->lock);
979
	if (mgr->mst_primary)
979
	if (mgr->mst_primary)
980
		rport = drm_dp_mst_get_port_ref_locked(mgr->mst_primary, port);
980
		rport = drm_dp_mst_get_port_ref_locked(mgr->mst_primary, port);
981
	mutex_unlock(&mgr->lock);
981
	mutex_unlock(&mgr->lock);
982
	return rport;
982
	return rport;
983
}
983
}
984
 
984
 
985
static struct drm_dp_mst_port *drm_dp_get_port(struct drm_dp_mst_branch *mstb, u8 port_num)
985
static struct drm_dp_mst_port *drm_dp_get_port(struct drm_dp_mst_branch *mstb, u8 port_num)
986
{
986
{
987
	struct drm_dp_mst_port *port;
987
	struct drm_dp_mst_port *port;
988
 
988
 
989
	list_for_each_entry(port, &mstb->ports, next) {
989
	list_for_each_entry(port, &mstb->ports, next) {
990
		if (port->port_num == port_num) {
990
		if (port->port_num == port_num) {
991
			kref_get(&port->kref);
991
			kref_get(&port->kref);
992
			return port;
992
			return port;
993
		}
993
		}
994
	}
994
	}
995
 
995
 
996
	return NULL;
996
	return NULL;
997
}
997
}
998
 
998
 
999
/*
999
/*
1000
 * calculate a new RAD for this MST branch device
1000
 * calculate a new RAD for this MST branch device
1001
 * if parent has an LCT of 2 then it has 1 nibble of RAD,
1001
 * if parent has an LCT of 2 then it has 1 nibble of RAD,
1002
 * if parent has an LCT of 3 then it has 2 nibbles of RAD,
1002
 * if parent has an LCT of 3 then it has 2 nibbles of RAD,
1003
 */
1003
 */
1004
static u8 drm_dp_calculate_rad(struct drm_dp_mst_port *port,
1004
static u8 drm_dp_calculate_rad(struct drm_dp_mst_port *port,
1005
				 u8 *rad)
1005
				 u8 *rad)
1006
{
1006
{
1007
	int parent_lct = port->parent->lct;
1007
	int parent_lct = port->parent->lct;
1008
	int shift = 4;
1008
	int shift = 4;
1009
	int idx = (parent_lct - 1) / 2;
1009
	int idx = (parent_lct - 1) / 2;
1010
	if (parent_lct > 1) {
1010
	if (parent_lct > 1) {
1011
		memcpy(rad, port->parent->rad, idx + 1);
1011
		memcpy(rad, port->parent->rad, idx + 1);
1012
		shift = (parent_lct % 2) ? 4 : 0;
1012
		shift = (parent_lct % 2) ? 4 : 0;
1013
	} else
1013
	} else
1014
		rad[0] = 0;
1014
		rad[0] = 0;
1015
 
1015
 
1016
	rad[idx] |= port->port_num << shift;
1016
	rad[idx] |= port->port_num << shift;
1017
	return parent_lct + 1;
1017
	return parent_lct + 1;
1018
}
1018
}
1019
 
1019
 
1020
/*
1020
/*
1021
 * return sends link address for new mstb
1021
 * return sends link address for new mstb
1022
 */
1022
 */
1023
static bool drm_dp_port_setup_pdt(struct drm_dp_mst_port *port)
1023
static bool drm_dp_port_setup_pdt(struct drm_dp_mst_port *port)
1024
{
1024
{
1025
	int ret;
1025
	int ret;
1026
	u8 rad[6], lct;
1026
	u8 rad[6], lct;
1027
	bool send_link = false;
1027
	bool send_link = false;
1028
	switch (port->pdt) {
1028
	switch (port->pdt) {
1029
	case DP_PEER_DEVICE_DP_LEGACY_CONV:
1029
	case DP_PEER_DEVICE_DP_LEGACY_CONV:
1030
	case DP_PEER_DEVICE_SST_SINK:
1030
	case DP_PEER_DEVICE_SST_SINK:
1031
		/* add i2c over sideband */
1031
		/* add i2c over sideband */
1032
		ret = drm_dp_mst_register_i2c_bus(&port->aux);
1032
		ret = drm_dp_mst_register_i2c_bus(&port->aux);
1033
		break;
1033
		break;
1034
	case DP_PEER_DEVICE_MST_BRANCHING:
1034
	case DP_PEER_DEVICE_MST_BRANCHING:
1035
		lct = drm_dp_calculate_rad(port, rad);
1035
		lct = drm_dp_calculate_rad(port, rad);
1036
 
1036
 
1037
		port->mstb = drm_dp_add_mst_branch_device(lct, rad);
1037
		port->mstb = drm_dp_add_mst_branch_device(lct, rad);
1038
		port->mstb->mgr = port->mgr;
1038
		port->mstb->mgr = port->mgr;
1039
		port->mstb->port_parent = port;
1039
		port->mstb->port_parent = port;
1040
 
1040
 
1041
		send_link = true;
1041
		send_link = true;
1042
		break;
1042
		break;
1043
	}
1043
	}
1044
	return send_link;
1044
	return send_link;
1045
}
1045
}
1046
 
1046
 
1047
static void drm_dp_check_mstb_guid(struct drm_dp_mst_branch *mstb, u8 *guid)
1047
static void drm_dp_check_mstb_guid(struct drm_dp_mst_branch *mstb, u8 *guid)
1048
{
1048
{
1049
	int ret;
1049
	int ret;
1050
 
1050
 
1051
	memcpy(mstb->guid, guid, 16);
1051
	memcpy(mstb->guid, guid, 16);
1052
 
1052
 
1053
	if (!drm_dp_validate_guid(mstb->mgr, mstb->guid)) {
1053
	if (!drm_dp_validate_guid(mstb->mgr, mstb->guid)) {
1054
		if (mstb->port_parent) {
1054
		if (mstb->port_parent) {
1055
			ret = drm_dp_send_dpcd_write(
1055
			ret = drm_dp_send_dpcd_write(
1056
					mstb->mgr,
1056
					mstb->mgr,
1057
					mstb->port_parent,
1057
					mstb->port_parent,
1058
					DP_GUID,
1058
					DP_GUID,
1059
					16,
1059
					16,
1060
					mstb->guid);
1060
					mstb->guid);
1061
		} else {
1061
		} else {
1062
 
1062
 
1063
			ret = drm_dp_dpcd_write(
1063
			ret = drm_dp_dpcd_write(
1064
					mstb->mgr->aux,
1064
					mstb->mgr->aux,
1065
						     DP_GUID,
1065
						     DP_GUID,
1066
					mstb->guid,
1066
					mstb->guid,
1067
					16);
1067
					16);
1068
		}
1068
		}
1069
	}
1069
	}
1070
}
1070
}
1071
 
1071
 
1072
static void build_mst_prop_path(const struct drm_dp_mst_branch *mstb,
1072
static void build_mst_prop_path(const struct drm_dp_mst_branch *mstb,
1073
				int pnum,
1073
				int pnum,
1074
				char *proppath,
1074
				char *proppath,
1075
				size_t proppath_size)
1075
				size_t proppath_size)
1076
{
1076
{
1077
	int i;
1077
	int i;
1078
	char temp[8];
1078
	char temp[8];
1079
	snprintf(proppath, proppath_size, "mst:%d", mstb->mgr->conn_base_id);
1079
	snprintf(proppath, proppath_size, "mst:%d", mstb->mgr->conn_base_id);
1080
	for (i = 0; i < (mstb->lct - 1); i++) {
1080
	for (i = 0; i < (mstb->lct - 1); i++) {
1081
		int shift = (i % 2) ? 0 : 4;
1081
		int shift = (i % 2) ? 0 : 4;
1082
		int port_num = (mstb->rad[i / 2] >> shift) & 0xf;
1082
		int port_num = (mstb->rad[i / 2] >> shift) & 0xf;
1083
		snprintf(temp, sizeof(temp), "-%d", port_num);
1083
		snprintf(temp, sizeof(temp), "-%d", port_num);
1084
		strlcat(proppath, temp, proppath_size);
1084
		strlcat(proppath, temp, proppath_size);
1085
	}
1085
	}
1086
	snprintf(temp, sizeof(temp), "-%d", pnum);
1086
	snprintf(temp, sizeof(temp), "-%d", pnum);
1087
	strlcat(proppath, temp, proppath_size);
1087
	strlcat(proppath, temp, proppath_size);
1088
}
1088
}
1089
 
1089
 
1090
static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
1090
static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
1091
			    struct device *dev,
1091
			    struct device *dev,
1092
			    struct drm_dp_link_addr_reply_port *port_msg)
1092
			    struct drm_dp_link_addr_reply_port *port_msg)
1093
{
1093
{
1094
	struct drm_dp_mst_port *port;
1094
	struct drm_dp_mst_port *port;
1095
	bool ret;
1095
	bool ret;
1096
	bool created = false;
1096
	bool created = false;
1097
	int old_pdt = 0;
1097
	int old_pdt = 0;
1098
	int old_ddps = 0;
1098
	int old_ddps = 0;
1099
	port = drm_dp_get_port(mstb, port_msg->port_number);
1099
	port = drm_dp_get_port(mstb, port_msg->port_number);
1100
	if (!port) {
1100
	if (!port) {
1101
		port = kzalloc(sizeof(*port), GFP_KERNEL);
1101
		port = kzalloc(sizeof(*port), GFP_KERNEL);
1102
		if (!port)
1102
		if (!port)
1103
			return;
1103
			return;
1104
		kref_init(&port->kref);
1104
		kref_init(&port->kref);
1105
		port->parent = mstb;
1105
		port->parent = mstb;
1106
		port->port_num = port_msg->port_number;
1106
		port->port_num = port_msg->port_number;
1107
		port->mgr = mstb->mgr;
1107
		port->mgr = mstb->mgr;
1108
		port->aux.name = "DPMST";
1108
		port->aux.name = "DPMST";
1109
		port->aux.dev = dev;
1109
		port->aux.dev = dev;
1110
		created = true;
1110
		created = true;
1111
	} else {
1111
	} else {
1112
		old_pdt = port->pdt;
1112
		old_pdt = port->pdt;
1113
		old_ddps = port->ddps;
1113
		old_ddps = port->ddps;
1114
	}
1114
	}
1115
 
1115
 
1116
	port->pdt = port_msg->peer_device_type;
1116
	port->pdt = port_msg->peer_device_type;
1117
	port->input = port_msg->input_port;
1117
	port->input = port_msg->input_port;
1118
	port->mcs = port_msg->mcs;
1118
	port->mcs = port_msg->mcs;
1119
	port->ddps = port_msg->ddps;
1119
	port->ddps = port_msg->ddps;
1120
	port->ldps = port_msg->legacy_device_plug_status;
1120
	port->ldps = port_msg->legacy_device_plug_status;
1121
	port->dpcd_rev = port_msg->dpcd_revision;
1121
	port->dpcd_rev = port_msg->dpcd_revision;
1122
	port->num_sdp_streams = port_msg->num_sdp_streams;
1122
	port->num_sdp_streams = port_msg->num_sdp_streams;
1123
	port->num_sdp_stream_sinks = port_msg->num_sdp_stream_sinks;
1123
	port->num_sdp_stream_sinks = port_msg->num_sdp_stream_sinks;
1124
 
1124
 
1125
	/* manage mstb port lists with mgr lock - take a reference
1125
	/* manage mstb port lists with mgr lock - take a reference
1126
	   for this list */
1126
	   for this list */
1127
	if (created) {
1127
	if (created) {
1128
		mutex_lock(&mstb->mgr->lock);
1128
		mutex_lock(&mstb->mgr->lock);
1129
		kref_get(&port->kref);
1129
		kref_get(&port->kref);
1130
		list_add(&port->next, &mstb->ports);
1130
		list_add(&port->next, &mstb->ports);
1131
		mutex_unlock(&mstb->mgr->lock);
1131
		mutex_unlock(&mstb->mgr->lock);
1132
	}
1132
	}
1133
 
1133
 
1134
	if (old_ddps != port->ddps) {
1134
	if (old_ddps != port->ddps) {
1135
		if (port->ddps) {
1135
		if (port->ddps) {
1136
			if (!port->input)
1136
			if (!port->input)
1137
				drm_dp_send_enum_path_resources(mstb->mgr, mstb, port);
1137
				drm_dp_send_enum_path_resources(mstb->mgr, mstb, port);
1138
		} else {
1138
		} else {
1139
			port->available_pbn = 0;
1139
			port->available_pbn = 0;
1140
			}
1140
			}
1141
	}
1141
	}
1142
 
1142
 
1143
	if (old_pdt != port->pdt && !port->input) {
1143
	if (old_pdt != port->pdt && !port->input) {
1144
		drm_dp_port_teardown_pdt(port, old_pdt);
1144
		drm_dp_port_teardown_pdt(port, old_pdt);
1145
 
1145
 
1146
		ret = drm_dp_port_setup_pdt(port);
1146
		ret = drm_dp_port_setup_pdt(port);
1147
		if (ret == true)
1147
		if (ret == true)
1148
			drm_dp_send_link_address(mstb->mgr, port->mstb);
1148
			drm_dp_send_link_address(mstb->mgr, port->mstb);
1149
	}
1149
	}
1150
 
1150
 
1151
	if (created && !port->input) {
1151
	if (created && !port->input) {
1152
		char proppath[255];
1152
		char proppath[255];
1153
 
1153
 
1154
		build_mst_prop_path(mstb, port->port_num, proppath, sizeof(proppath));
1154
		build_mst_prop_path(mstb, port->port_num, proppath, sizeof(proppath));
1155
		port->connector = (*mstb->mgr->cbs->add_connector)(mstb->mgr, port, proppath);
1155
		port->connector = (*mstb->mgr->cbs->add_connector)(mstb->mgr, port, proppath);
1156
		if (!port->connector) {
1156
		if (!port->connector) {
1157
			/* remove it from the port list */
1157
			/* remove it from the port list */
1158
			mutex_lock(&mstb->mgr->lock);
1158
			mutex_lock(&mstb->mgr->lock);
1159
			list_del(&port->next);
1159
			list_del(&port->next);
1160
			mutex_unlock(&mstb->mgr->lock);
1160
			mutex_unlock(&mstb->mgr->lock);
1161
			/* drop port list reference */
1161
			/* drop port list reference */
1162
			drm_dp_put_port(port);
1162
			drm_dp_put_port(port);
1163
			goto out;
1163
			goto out;
1164
		}
1164
		}
1165
		if ((port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV ||
-
 
1166
		     port->pdt == DP_PEER_DEVICE_SST_SINK) &&
-
 
1167
		    port->port_num >= DP_MST_LOGICAL_PORT_0) {
1165
		if (port->port_num >= DP_MST_LOGICAL_PORT_0) {
1168
			port->cached_edid = drm_get_edid(port->connector, &port->aux.ddc);
1166
			port->cached_edid = drm_get_edid(port->connector, &port->aux.ddc);
1169
			drm_mode_connector_set_tile_property(port->connector);
1167
			drm_mode_connector_set_tile_property(port->connector);
1170
		}
1168
		}
1171
		(*mstb->mgr->cbs->register_connector)(port->connector);
1169
		(*mstb->mgr->cbs->register_connector)(port->connector);
1172
	}
1170
	}
1173
 
1171
 
1174
out:
1172
out:
1175
	/* put reference to this port */
1173
	/* put reference to this port */
1176
	drm_dp_put_port(port);
1174
	drm_dp_put_port(port);
1177
}
1175
}
1178
 
1176
 
1179
static void drm_dp_update_port(struct drm_dp_mst_branch *mstb,
1177
static void drm_dp_update_port(struct drm_dp_mst_branch *mstb,
1180
			       struct drm_dp_connection_status_notify *conn_stat)
1178
			       struct drm_dp_connection_status_notify *conn_stat)
1181
{
1179
{
1182
	struct drm_dp_mst_port *port;
1180
	struct drm_dp_mst_port *port;
1183
	int old_pdt;
1181
	int old_pdt;
1184
	int old_ddps;
1182
	int old_ddps;
1185
	bool dowork = false;
1183
	bool dowork = false;
1186
	port = drm_dp_get_port(mstb, conn_stat->port_number);
1184
	port = drm_dp_get_port(mstb, conn_stat->port_number);
1187
	if (!port)
1185
	if (!port)
1188
		return;
1186
		return;
1189
 
1187
 
1190
	old_ddps = port->ddps;
1188
	old_ddps = port->ddps;
1191
	old_pdt = port->pdt;
1189
	old_pdt = port->pdt;
1192
	port->pdt = conn_stat->peer_device_type;
1190
	port->pdt = conn_stat->peer_device_type;
1193
	port->mcs = conn_stat->message_capability_status;
1191
	port->mcs = conn_stat->message_capability_status;
1194
	port->ldps = conn_stat->legacy_device_plug_status;
1192
	port->ldps = conn_stat->legacy_device_plug_status;
1195
	port->ddps = conn_stat->displayport_device_plug_status;
1193
	port->ddps = conn_stat->displayport_device_plug_status;
1196
 
1194
 
1197
	if (old_ddps != port->ddps) {
1195
	if (old_ddps != port->ddps) {
1198
		if (port->ddps) {
1196
		if (port->ddps) {
1199
			dowork = true;
1197
			dowork = true;
1200
		} else {
1198
		} else {
1201
			port->available_pbn = 0;
1199
			port->available_pbn = 0;
1202
		}
1200
		}
1203
	}
1201
	}
1204
	if (old_pdt != port->pdt && !port->input) {
1202
	if (old_pdt != port->pdt && !port->input) {
1205
		drm_dp_port_teardown_pdt(port, old_pdt);
1203
		drm_dp_port_teardown_pdt(port, old_pdt);
1206
 
1204
 
1207
		if (drm_dp_port_setup_pdt(port))
1205
		if (drm_dp_port_setup_pdt(port))
1208
			dowork = true;
1206
			dowork = true;
1209
	}
1207
	}
1210
 
1208
 
1211
	drm_dp_put_port(port);
1209
	drm_dp_put_port(port);
1212
//   if (dowork)
1210
//   if (dowork)
1213
//       queue_work(system_long_wq, &mstb->mgr->work);
1211
//       queue_work(system_long_wq, &mstb->mgr->work);
1214
 
1212
 
1215
}
1213
}
1216
 
1214
 
1217
static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_topology_mgr *mgr,
1215
static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_topology_mgr *mgr,
1218
							       u8 lct, u8 *rad)
1216
							       u8 lct, u8 *rad)
1219
{
1217
{
1220
	struct drm_dp_mst_branch *mstb;
1218
	struct drm_dp_mst_branch *mstb;
1221
	struct drm_dp_mst_port *port;
1219
	struct drm_dp_mst_port *port;
1222
	int i;
1220
	int i;
1223
	/* find the port by iterating down */
1221
	/* find the port by iterating down */
1224
 
1222
 
1225
	mutex_lock(&mgr->lock);
1223
	mutex_lock(&mgr->lock);
1226
	mstb = mgr->mst_primary;
1224
	mstb = mgr->mst_primary;
1227
 
1225
 
1228
	for (i = 0; i < lct - 1; i++) {
1226
	for (i = 0; i < lct - 1; i++) {
1229
		int shift = (i % 2) ? 0 : 4;
1227
		int shift = (i % 2) ? 0 : 4;
1230
		int port_num = (rad[i / 2] >> shift) & 0xf;
1228
		int port_num = (rad[i / 2] >> shift) & 0xf;
1231
 
1229
 
1232
		list_for_each_entry(port, &mstb->ports, next) {
1230
		list_for_each_entry(port, &mstb->ports, next) {
1233
			if (port->port_num == port_num) {
1231
			if (port->port_num == port_num) {
1234
				mstb = port->mstb;
1232
				mstb = port->mstb;
1235
				if (!mstb) {
1233
				if (!mstb) {
1236
					DRM_ERROR("failed to lookup MSTB with lct %d, rad %02x\n", lct, rad[0]);
1234
					DRM_ERROR("failed to lookup MSTB with lct %d, rad %02x\n", lct, rad[0]);
1237
					goto out;
1235
					goto out;
1238
				}
1236
				}
1239
 
1237
 
1240
				break;
1238
				break;
1241
			}
1239
			}
1242
		}
1240
		}
1243
	}
1241
	}
1244
	kref_get(&mstb->kref);
1242
	kref_get(&mstb->kref);
1245
out:
1243
out:
1246
	mutex_unlock(&mgr->lock);
1244
	mutex_unlock(&mgr->lock);
1247
	return mstb;
1245
	return mstb;
1248
}
1246
}
1249
 
1247
 
1250
static struct drm_dp_mst_branch *get_mst_branch_device_by_guid_helper(
1248
static struct drm_dp_mst_branch *get_mst_branch_device_by_guid_helper(
1251
	struct drm_dp_mst_branch *mstb,
1249
	struct drm_dp_mst_branch *mstb,
1252
	uint8_t *guid)
1250
	uint8_t *guid)
1253
{
1251
{
1254
	struct drm_dp_mst_branch *found_mstb;
1252
	struct drm_dp_mst_branch *found_mstb;
1255
	struct drm_dp_mst_port *port;
1253
	struct drm_dp_mst_port *port;
1256
 
1254
 
1257
	if (memcmp(mstb->guid, guid, 16) == 0)
1255
	if (memcmp(mstb->guid, guid, 16) == 0)
1258
		return mstb;
1256
		return mstb;
1259
 
1257
 
1260
 
1258
 
1261
	list_for_each_entry(port, &mstb->ports, next) {
1259
	list_for_each_entry(port, &mstb->ports, next) {
1262
		if (!port->mstb)
1260
		if (!port->mstb)
1263
			continue;
1261
			continue;
1264
 
1262
 
1265
		found_mstb = get_mst_branch_device_by_guid_helper(port->mstb, guid);
1263
		found_mstb = get_mst_branch_device_by_guid_helper(port->mstb, guid);
1266
 
1264
 
1267
		if (found_mstb)
1265
		if (found_mstb)
1268
			return found_mstb;
1266
			return found_mstb;
1269
	}
1267
	}
1270
 
1268
 
1271
	return NULL;
1269
	return NULL;
1272
}
1270
}
1273
 
1271
 
1274
static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device_by_guid(
1272
static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device_by_guid(
1275
	struct drm_dp_mst_topology_mgr *mgr,
1273
	struct drm_dp_mst_topology_mgr *mgr,
1276
	uint8_t *guid)
1274
	uint8_t *guid)
1277
{
1275
{
1278
	struct drm_dp_mst_branch *mstb;
1276
	struct drm_dp_mst_branch *mstb;
1279
 
1277
 
1280
	/* find the port by iterating down */
1278
	/* find the port by iterating down */
1281
	mutex_lock(&mgr->lock);
1279
	mutex_lock(&mgr->lock);
1282
 
1280
 
1283
	mstb = get_mst_branch_device_by_guid_helper(mgr->mst_primary, guid);
1281
	mstb = get_mst_branch_device_by_guid_helper(mgr->mst_primary, guid);
1284
 
1282
 
1285
	if (mstb)
1283
	if (mstb)
1286
		kref_get(&mstb->kref);
1284
		kref_get(&mstb->kref);
1287
 
1285
 
1288
	mutex_unlock(&mgr->lock);
1286
	mutex_unlock(&mgr->lock);
1289
	return mstb;
1287
	return mstb;
1290
}
1288
}
1291
 
1289
 
1292
static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
1290
static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
1293
					       struct drm_dp_mst_branch *mstb)
1291
					       struct drm_dp_mst_branch *mstb)
1294
{
1292
{
1295
	struct drm_dp_mst_port *port;
1293
	struct drm_dp_mst_port *port;
1296
	struct drm_dp_mst_branch *mstb_child;
1294
	struct drm_dp_mst_branch *mstb_child;
1297
	if (!mstb->link_address_sent)
1295
	if (!mstb->link_address_sent)
1298
		drm_dp_send_link_address(mgr, mstb);
1296
		drm_dp_send_link_address(mgr, mstb);
1299
 
1297
 
1300
	list_for_each_entry(port, &mstb->ports, next) {
1298
	list_for_each_entry(port, &mstb->ports, next) {
1301
		if (port->input)
1299
		if (port->input)
1302
			continue;
1300
			continue;
1303
 
1301
 
1304
		if (!port->ddps)
1302
		if (!port->ddps)
1305
			continue;
1303
			continue;
1306
 
1304
 
1307
		if (!port->available_pbn)
1305
		if (!port->available_pbn)
1308
			drm_dp_send_enum_path_resources(mgr, mstb, port);
1306
			drm_dp_send_enum_path_resources(mgr, mstb, port);
1309
 
1307
 
1310
		if (port->mstb) {
1308
		if (port->mstb) {
1311
			mstb_child = drm_dp_get_validated_mstb_ref(mgr, port->mstb);
1309
			mstb_child = drm_dp_get_validated_mstb_ref(mgr, port->mstb);
1312
			if (mstb_child) {
1310
			if (mstb_child) {
1313
				drm_dp_check_and_send_link_address(mgr, mstb_child);
1311
				drm_dp_check_and_send_link_address(mgr, mstb_child);
1314
				drm_dp_put_mst_branch_device(mstb_child);
1312
				drm_dp_put_mst_branch_device(mstb_child);
1315
			}
1313
			}
1316
		}
1314
		}
1317
	}
1315
	}
1318
}
1316
}
1319
 
1317
 
1320
static void drm_dp_mst_link_probe_work(struct work_struct *work)
1318
static void drm_dp_mst_link_probe_work(struct work_struct *work)
1321
{
1319
{
1322
	struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, work);
1320
	struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, work);
1323
	struct drm_dp_mst_branch *mstb;
1321
	struct drm_dp_mst_branch *mstb;
1324
 
1322
 
1325
	mutex_lock(&mgr->lock);
1323
	mutex_lock(&mgr->lock);
1326
	mstb = mgr->mst_primary;
1324
	mstb = mgr->mst_primary;
1327
	if (mstb) {
1325
	if (mstb) {
1328
		kref_get(&mstb->kref);
1326
		kref_get(&mstb->kref);
1329
	}
1327
	}
1330
	mutex_unlock(&mgr->lock);
1328
	mutex_unlock(&mgr->lock);
1331
	if (mstb) {
1329
	if (mstb) {
1332
		drm_dp_check_and_send_link_address(mgr, mstb);
1330
		drm_dp_check_and_send_link_address(mgr, mstb);
1333
		drm_dp_put_mst_branch_device(mstb);
1331
		drm_dp_put_mst_branch_device(mstb);
1334
	}
1332
	}
1335
}
1333
}
1336
 
1334
 
1337
static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
1335
static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
1338
				 u8 *guid)
1336
				 u8 *guid)
1339
{
1337
{
1340
	static u8 zero_guid[16];
1338
	static u8 zero_guid[16];
1341
 
1339
 
1342
	if (!memcmp(guid, zero_guid, 16)) {
1340
	if (!memcmp(guid, zero_guid, 16)) {
1343
		u64 salt = get_jiffies_64();
1341
		u64 salt = get_jiffies_64();
1344
		memcpy(&guid[0], &salt, sizeof(u64));
1342
		memcpy(&guid[0], &salt, sizeof(u64));
1345
		memcpy(&guid[8], &salt, sizeof(u64));
1343
		memcpy(&guid[8], &salt, sizeof(u64));
1346
		return false;
1344
		return false;
1347
	}
1345
	}
1348
	return true;
1346
	return true;
1349
}
1347
}
1350
 
1348
 
1351
#if 0
1349
#if 0
1352
static int build_dpcd_read(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32 offset, u8 num_bytes)
1350
static int build_dpcd_read(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32 offset, u8 num_bytes)
1353
{
1351
{
1354
	struct drm_dp_sideband_msg_req_body req;
1352
	struct drm_dp_sideband_msg_req_body req;
1355
 
1353
 
1356
	req.req_type = DP_REMOTE_DPCD_READ;
1354
	req.req_type = DP_REMOTE_DPCD_READ;
1357
	req.u.dpcd_read.port_number = port_num;
1355
	req.u.dpcd_read.port_number = port_num;
1358
	req.u.dpcd_read.dpcd_address = offset;
1356
	req.u.dpcd_read.dpcd_address = offset;
1359
	req.u.dpcd_read.num_bytes = num_bytes;
1357
	req.u.dpcd_read.num_bytes = num_bytes;
1360
	drm_dp_encode_sideband_req(&req, msg);
1358
	drm_dp_encode_sideband_req(&req, msg);
1361
 
1359
 
1362
	return 0;
1360
	return 0;
1363
}
1361
}
1364
#endif
1362
#endif
1365
 
1363
 
1366
static int drm_dp_send_sideband_msg(struct drm_dp_mst_topology_mgr *mgr,
1364
static int drm_dp_send_sideband_msg(struct drm_dp_mst_topology_mgr *mgr,
1367
				    bool up, u8 *msg, int len)
1365
				    bool up, u8 *msg, int len)
1368
{
1366
{
1369
	int ret;
1367
	int ret;
1370
	int regbase = up ? DP_SIDEBAND_MSG_UP_REP_BASE : DP_SIDEBAND_MSG_DOWN_REQ_BASE;
1368
	int regbase = up ? DP_SIDEBAND_MSG_UP_REP_BASE : DP_SIDEBAND_MSG_DOWN_REQ_BASE;
1371
	int tosend, total, offset;
1369
	int tosend, total, offset;
1372
	int retries = 0;
1370
	int retries = 0;
1373
 
1371
 
1374
retry:
1372
retry:
1375
	total = len;
1373
	total = len;
1376
	offset = 0;
1374
	offset = 0;
1377
	do {
1375
	do {
1378
		tosend = min3(mgr->max_dpcd_transaction_bytes, 16, total);
1376
		tosend = min3(mgr->max_dpcd_transaction_bytes, 16, total);
1379
 
1377
 
1380
		ret = drm_dp_dpcd_write(mgr->aux, regbase + offset,
1378
		ret = drm_dp_dpcd_write(mgr->aux, regbase + offset,
1381
					&msg[offset],
1379
					&msg[offset],
1382
					tosend);
1380
					tosend);
1383
		if (ret != tosend) {
1381
		if (ret != tosend) {
1384
			if (ret == -EIO && retries < 5) {
1382
			if (ret == -EIO && retries < 5) {
1385
				retries++;
1383
				retries++;
1386
				goto retry;
1384
				goto retry;
1387
			}
1385
			}
1388
			DRM_DEBUG_KMS("failed to dpcd write %d %d\n", tosend, ret);
1386
			DRM_DEBUG_KMS("failed to dpcd write %d %d\n", tosend, ret);
1389
 
1387
 
1390
			return -EIO;
1388
			return -EIO;
1391
		}
1389
		}
1392
		offset += tosend;
1390
		offset += tosend;
1393
		total -= tosend;
1391
		total -= tosend;
1394
	} while (total > 0);
1392
	} while (total > 0);
1395
	return 0;
1393
	return 0;
1396
}
1394
}
1397
 
1395
 
1398
static int set_hdr_from_dst_qlock(struct drm_dp_sideband_msg_hdr *hdr,
1396
static int set_hdr_from_dst_qlock(struct drm_dp_sideband_msg_hdr *hdr,
1399
				  struct drm_dp_sideband_msg_tx *txmsg)
1397
				  struct drm_dp_sideband_msg_tx *txmsg)
1400
{
1398
{
1401
	struct drm_dp_mst_branch *mstb = txmsg->dst;
1399
	struct drm_dp_mst_branch *mstb = txmsg->dst;
1402
	u8 req_type;
1400
	u8 req_type;
1403
 
1401
 
1404
	/* both msg slots are full */
1402
	/* both msg slots are full */
1405
	if (txmsg->seqno == -1) {
1403
	if (txmsg->seqno == -1) {
1406
		if (mstb->tx_slots[0] && mstb->tx_slots[1]) {
1404
		if (mstb->tx_slots[0] && mstb->tx_slots[1]) {
1407
			DRM_DEBUG_KMS("%s: failed to find slot\n", __func__);
1405
			DRM_DEBUG_KMS("%s: failed to find slot\n", __func__);
1408
			return -EAGAIN;
1406
			return -EAGAIN;
1409
		}
1407
		}
1410
		if (mstb->tx_slots[0] == NULL && mstb->tx_slots[1] == NULL) {
1408
		if (mstb->tx_slots[0] == NULL && mstb->tx_slots[1] == NULL) {
1411
			txmsg->seqno = mstb->last_seqno;
1409
			txmsg->seqno = mstb->last_seqno;
1412
			mstb->last_seqno ^= 1;
1410
			mstb->last_seqno ^= 1;
1413
		} else if (mstb->tx_slots[0] == NULL)
1411
		} else if (mstb->tx_slots[0] == NULL)
1414
			txmsg->seqno = 0;
1412
			txmsg->seqno = 0;
1415
		else
1413
		else
1416
			txmsg->seqno = 1;
1414
			txmsg->seqno = 1;
1417
		mstb->tx_slots[txmsg->seqno] = txmsg;
1415
		mstb->tx_slots[txmsg->seqno] = txmsg;
1418
	}
1416
	}
1419
 
1417
 
1420
	req_type = txmsg->msg[0] & 0x7f;
1418
	req_type = txmsg->msg[0] & 0x7f;
1421
	if (req_type == DP_CONNECTION_STATUS_NOTIFY ||
1419
	if (req_type == DP_CONNECTION_STATUS_NOTIFY ||
1422
		req_type == DP_RESOURCE_STATUS_NOTIFY)
1420
		req_type == DP_RESOURCE_STATUS_NOTIFY)
1423
		hdr->broadcast = 1;
1421
		hdr->broadcast = 1;
1424
	else
1422
	else
1425
	hdr->broadcast = 0;
1423
	hdr->broadcast = 0;
1426
	hdr->path_msg = txmsg->path_msg;
1424
	hdr->path_msg = txmsg->path_msg;
1427
	hdr->lct = mstb->lct;
1425
	hdr->lct = mstb->lct;
1428
	hdr->lcr = mstb->lct - 1;
1426
	hdr->lcr = mstb->lct - 1;
1429
	if (mstb->lct > 1)
1427
	if (mstb->lct > 1)
1430
		memcpy(hdr->rad, mstb->rad, mstb->lct / 2);
1428
		memcpy(hdr->rad, mstb->rad, mstb->lct / 2);
1431
	hdr->seqno = txmsg->seqno;
1429
	hdr->seqno = txmsg->seqno;
1432
	return 0;
1430
	return 0;
1433
}
1431
}
1434
/*
1432
/*
1435
 * process a single block of the next message in the sideband queue
1433
 * process a single block of the next message in the sideband queue
1436
 */
1434
 */
1437
static int process_single_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
1435
static int process_single_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
1438
				   struct drm_dp_sideband_msg_tx *txmsg,
1436
				   struct drm_dp_sideband_msg_tx *txmsg,
1439
				   bool up)
1437
				   bool up)
1440
{
1438
{
1441
	u8 chunk[48];
1439
	u8 chunk[48];
1442
	struct drm_dp_sideband_msg_hdr hdr;
1440
	struct drm_dp_sideband_msg_hdr hdr;
1443
	int len, space, idx, tosend;
1441
	int len, space, idx, tosend;
1444
	int ret;
1442
	int ret;
1445
 
1443
 
1446
	memset(&hdr, 0, sizeof(struct drm_dp_sideband_msg_hdr));
1444
	memset(&hdr, 0, sizeof(struct drm_dp_sideband_msg_hdr));
1447
 
1445
 
1448
	if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED) {
1446
	if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED) {
1449
		txmsg->seqno = -1;
1447
		txmsg->seqno = -1;
1450
		txmsg->state = DRM_DP_SIDEBAND_TX_START_SEND;
1448
		txmsg->state = DRM_DP_SIDEBAND_TX_START_SEND;
1451
	}
1449
	}
1452
 
1450
 
1453
	/* make hdr from dst mst - for replies use seqno
1451
	/* make hdr from dst mst - for replies use seqno
1454
	   otherwise assign one */
1452
	   otherwise assign one */
1455
	ret = set_hdr_from_dst_qlock(&hdr, txmsg);
1453
	ret = set_hdr_from_dst_qlock(&hdr, txmsg);
1456
	if (ret < 0)
1454
	if (ret < 0)
1457
		return ret;
1455
		return ret;
1458
 
1456
 
1459
	/* amount left to send in this message */
1457
	/* amount left to send in this message */
1460
	len = txmsg->cur_len - txmsg->cur_offset;
1458
	len = txmsg->cur_len - txmsg->cur_offset;
1461
 
1459
 
1462
	/* 48 - sideband msg size - 1 byte for data CRC, x header bytes */
1460
	/* 48 - sideband msg size - 1 byte for data CRC, x header bytes */
1463
	space = 48 - 1 - drm_dp_calc_sb_hdr_size(&hdr);
1461
	space = 48 - 1 - drm_dp_calc_sb_hdr_size(&hdr);
1464
 
1462
 
1465
	tosend = min(len, space);
1463
	tosend = min(len, space);
1466
	if (len == txmsg->cur_len)
1464
	if (len == txmsg->cur_len)
1467
		hdr.somt = 1;
1465
		hdr.somt = 1;
1468
	if (space >= len)
1466
	if (space >= len)
1469
		hdr.eomt = 1;
1467
		hdr.eomt = 1;
1470
 
1468
 
1471
 
1469
 
1472
	hdr.msg_len = tosend + 1;
1470
	hdr.msg_len = tosend + 1;
1473
	drm_dp_encode_sideband_msg_hdr(&hdr, chunk, &idx);
1471
	drm_dp_encode_sideband_msg_hdr(&hdr, chunk, &idx);
1474
	memcpy(&chunk[idx], &txmsg->msg[txmsg->cur_offset], tosend);
1472
	memcpy(&chunk[idx], &txmsg->msg[txmsg->cur_offset], tosend);
1475
	/* add crc at end */
1473
	/* add crc at end */
1476
	drm_dp_crc_sideband_chunk_req(&chunk[idx], tosend);
1474
	drm_dp_crc_sideband_chunk_req(&chunk[idx], tosend);
1477
	idx += tosend + 1;
1475
	idx += tosend + 1;
1478
 
1476
 
1479
	ret = drm_dp_send_sideband_msg(mgr, up, chunk, idx);
1477
	ret = drm_dp_send_sideband_msg(mgr, up, chunk, idx);
1480
	if (ret) {
1478
	if (ret) {
1481
		DRM_DEBUG_KMS("sideband msg failed to send\n");
1479
		DRM_DEBUG_KMS("sideband msg failed to send\n");
1482
		return ret;
1480
		return ret;
1483
	}
1481
	}
1484
 
1482
 
1485
	txmsg->cur_offset += tosend;
1483
	txmsg->cur_offset += tosend;
1486
	if (txmsg->cur_offset == txmsg->cur_len) {
1484
	if (txmsg->cur_offset == txmsg->cur_len) {
1487
		txmsg->state = DRM_DP_SIDEBAND_TX_SENT;
1485
		txmsg->state = DRM_DP_SIDEBAND_TX_SENT;
1488
		return 1;
1486
		return 1;
1489
	}
1487
	}
1490
	return 0;
1488
	return 0;
1491
}
1489
}
1492
 
1490
 
1493
static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
1491
static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
1494
{
1492
{
1495
	struct drm_dp_sideband_msg_tx *txmsg;
1493
	struct drm_dp_sideband_msg_tx *txmsg;
1496
	int ret;
1494
	int ret;
1497
 
1495
 
1498
	WARN_ON(!mutex_is_locked(&mgr->qlock));
1496
	WARN_ON(!mutex_is_locked(&mgr->qlock));
1499
 
1497
 
1500
	/* construct a chunk from the first msg in the tx_msg queue */
1498
	/* construct a chunk from the first msg in the tx_msg queue */
1501
	if (list_empty(&mgr->tx_msg_downq)) {
1499
	if (list_empty(&mgr->tx_msg_downq)) {
1502
		mgr->tx_down_in_progress = false;
1500
		mgr->tx_down_in_progress = false;
1503
		return;
1501
		return;
1504
	}
1502
	}
1505
	mgr->tx_down_in_progress = true;
1503
	mgr->tx_down_in_progress = true;
1506
 
1504
 
1507
	txmsg = list_first_entry(&mgr->tx_msg_downq, struct drm_dp_sideband_msg_tx, next);
1505
	txmsg = list_first_entry(&mgr->tx_msg_downq, struct drm_dp_sideband_msg_tx, next);
1508
	ret = process_single_tx_qlock(mgr, txmsg, false);
1506
	ret = process_single_tx_qlock(mgr, txmsg, false);
1509
	if (ret == 1) {
1507
	if (ret == 1) {
1510
		/* txmsg is sent it should be in the slots now */
1508
		/* txmsg is sent it should be in the slots now */
1511
		list_del(&txmsg->next);
1509
		list_del(&txmsg->next);
1512
	} else if (ret) {
1510
	} else if (ret) {
1513
		DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
1511
		DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
1514
		list_del(&txmsg->next);
1512
		list_del(&txmsg->next);
1515
		if (txmsg->seqno != -1)
1513
		if (txmsg->seqno != -1)
1516
			txmsg->dst->tx_slots[txmsg->seqno] = NULL;
1514
			txmsg->dst->tx_slots[txmsg->seqno] = NULL;
1517
		txmsg->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
1515
		txmsg->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
1518
//       wake_up(&mgr->tx_waitq);
1516
//       wake_up(&mgr->tx_waitq);
1519
	}
1517
	}
1520
	if (list_empty(&mgr->tx_msg_downq)) {
1518
	if (list_empty(&mgr->tx_msg_downq)) {
1521
		mgr->tx_down_in_progress = false;
1519
		mgr->tx_down_in_progress = false;
1522
		return;
1520
		return;
1523
	}
1521
	}
1524
}
1522
}
1525
 
1523
 
1526
/* called holding qlock */
1524
/* called holding qlock */
1527
static void process_single_up_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
1525
static void process_single_up_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
1528
				       struct drm_dp_sideband_msg_tx *txmsg)
1526
				       struct drm_dp_sideband_msg_tx *txmsg)
1529
{
1527
{
1530
	int ret;
1528
	int ret;
1531
 
1529
 
1532
	/* construct a chunk from the first msg in the tx_msg queue */
1530
	/* construct a chunk from the first msg in the tx_msg queue */
1533
	ret = process_single_tx_qlock(mgr, txmsg, true);
1531
	ret = process_single_tx_qlock(mgr, txmsg, true);
1534
 
1532
 
1535
	if (ret != 1)
1533
	if (ret != 1)
1536
		DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
1534
		DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
1537
 
1535
 
1538
	txmsg->dst->tx_slots[txmsg->seqno] = NULL;
1536
	txmsg->dst->tx_slots[txmsg->seqno] = NULL;
1539
}
1537
}
1540
 
1538
 
1541
static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,
1539
static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,
1542
				 struct drm_dp_sideband_msg_tx *txmsg)
1540
				 struct drm_dp_sideband_msg_tx *txmsg)
1543
{
1541
{
1544
	mutex_lock(&mgr->qlock);
1542
	mutex_lock(&mgr->qlock);
1545
	list_add_tail(&txmsg->next, &mgr->tx_msg_downq);
1543
	list_add_tail(&txmsg->next, &mgr->tx_msg_downq);
1546
	if (!mgr->tx_down_in_progress)
1544
	if (!mgr->tx_down_in_progress)
1547
		process_single_down_tx_qlock(mgr);
1545
		process_single_down_tx_qlock(mgr);
1548
	mutex_unlock(&mgr->qlock);
1546
	mutex_unlock(&mgr->qlock);
1549
}
1547
}
1550
 
1548
 
1551
static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
1549
static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
1552
				     struct drm_dp_mst_branch *mstb)
1550
				     struct drm_dp_mst_branch *mstb)
1553
{
1551
{
1554
	int len;
1552
	int len;
1555
	struct drm_dp_sideband_msg_tx *txmsg;
1553
	struct drm_dp_sideband_msg_tx *txmsg;
1556
	int ret;
1554
	int ret;
1557
 
1555
 
1558
	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
1556
	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
1559
	if (!txmsg)
1557
	if (!txmsg)
1560
		return;
1558
		return;
1561
 
1559
 
1562
	txmsg->dst = mstb;
1560
	txmsg->dst = mstb;
1563
	len = build_link_address(txmsg);
1561
	len = build_link_address(txmsg);
1564
 
1562
 
1565
	mstb->link_address_sent = true;
1563
	mstb->link_address_sent = true;
1566
	drm_dp_queue_down_tx(mgr, txmsg);
1564
	drm_dp_queue_down_tx(mgr, txmsg);
1567
 
1565
 
1568
	ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
1566
	ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
1569
	if (ret > 0) {
1567
	if (ret > 0) {
1570
		int i;
1568
		int i;
1571
 
1569
 
1572
		if (txmsg->reply.reply_type == 1)
1570
		if (txmsg->reply.reply_type == 1)
1573
			DRM_DEBUG_KMS("link address nak received\n");
1571
			DRM_DEBUG_KMS("link address nak received\n");
1574
		else {
1572
		else {
1575
			DRM_DEBUG_KMS("link address reply: %d\n", txmsg->reply.u.link_addr.nports);
1573
			DRM_DEBUG_KMS("link address reply: %d\n", txmsg->reply.u.link_addr.nports);
1576
			for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) {
1574
			for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) {
1577
				DRM_DEBUG_KMS("port %d: input %d, pdt: %d, pn: %d, dpcd_rev: %02x, mcs: %d, ddps: %d, ldps %d, sdp %d/%d\n", i,
1575
				DRM_DEBUG_KMS("port %d: input %d, pdt: %d, pn: %d, dpcd_rev: %02x, mcs: %d, ddps: %d, ldps %d, sdp %d/%d\n", i,
1578
				       txmsg->reply.u.link_addr.ports[i].input_port,
1576
				       txmsg->reply.u.link_addr.ports[i].input_port,
1579
				       txmsg->reply.u.link_addr.ports[i].peer_device_type,
1577
				       txmsg->reply.u.link_addr.ports[i].peer_device_type,
1580
				       txmsg->reply.u.link_addr.ports[i].port_number,
1578
				       txmsg->reply.u.link_addr.ports[i].port_number,
1581
				       txmsg->reply.u.link_addr.ports[i].dpcd_revision,
1579
				       txmsg->reply.u.link_addr.ports[i].dpcd_revision,
1582
				       txmsg->reply.u.link_addr.ports[i].mcs,
1580
				       txmsg->reply.u.link_addr.ports[i].mcs,
1583
				       txmsg->reply.u.link_addr.ports[i].ddps,
1581
				       txmsg->reply.u.link_addr.ports[i].ddps,
1584
				       txmsg->reply.u.link_addr.ports[i].legacy_device_plug_status,
1582
				       txmsg->reply.u.link_addr.ports[i].legacy_device_plug_status,
1585
				       txmsg->reply.u.link_addr.ports[i].num_sdp_streams,
1583
				       txmsg->reply.u.link_addr.ports[i].num_sdp_streams,
1586
				       txmsg->reply.u.link_addr.ports[i].num_sdp_stream_sinks);
1584
				       txmsg->reply.u.link_addr.ports[i].num_sdp_stream_sinks);
1587
			}
1585
			}
1588
 
1586
 
1589
			drm_dp_check_mstb_guid(mstb, txmsg->reply.u.link_addr.guid);
1587
			drm_dp_check_mstb_guid(mstb, txmsg->reply.u.link_addr.guid);
1590
 
1588
 
1591
			for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) {
1589
			for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) {
1592
				drm_dp_add_port(mstb, mgr->dev, &txmsg->reply.u.link_addr.ports[i]);
1590
				drm_dp_add_port(mstb, mgr->dev, &txmsg->reply.u.link_addr.ports[i]);
1593
			}
1591
			}
1594
			(*mgr->cbs->hotplug)(mgr);
1592
			(*mgr->cbs->hotplug)(mgr);
1595
		}
1593
		}
1596
	} else {
1594
	} else {
1597
		mstb->link_address_sent = false;
1595
		mstb->link_address_sent = false;
1598
		DRM_DEBUG_KMS("link address failed %d\n", ret);
1596
		DRM_DEBUG_KMS("link address failed %d\n", ret);
1599
	}
1597
	}
1600
 
1598
 
1601
	kfree(txmsg);
1599
	kfree(txmsg);
1602
}
1600
}
1603
 
1601
 
1604
static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
1602
static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
1605
					   struct drm_dp_mst_branch *mstb,
1603
					   struct drm_dp_mst_branch *mstb,
1606
					   struct drm_dp_mst_port *port)
1604
					   struct drm_dp_mst_port *port)
1607
{
1605
{
1608
	int len;
1606
	int len;
1609
	struct drm_dp_sideband_msg_tx *txmsg;
1607
	struct drm_dp_sideband_msg_tx *txmsg;
1610
	int ret;
1608
	int ret;
1611
 
1609
 
1612
	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
1610
	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
1613
	if (!txmsg)
1611
	if (!txmsg)
1614
		return -ENOMEM;
1612
		return -ENOMEM;
1615
 
1613
 
1616
	txmsg->dst = mstb;
1614
	txmsg->dst = mstb;
1617
	len = build_enum_path_resources(txmsg, port->port_num);
1615
	len = build_enum_path_resources(txmsg, port->port_num);
1618
 
1616
 
1619
	drm_dp_queue_down_tx(mgr, txmsg);
1617
	drm_dp_queue_down_tx(mgr, txmsg);
1620
 
1618
 
1621
	ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
1619
	ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
1622
	if (ret > 0) {
1620
	if (ret > 0) {
1623
		if (txmsg->reply.reply_type == 1)
1621
		if (txmsg->reply.reply_type == 1)
1624
			DRM_DEBUG_KMS("enum path resources nak received\n");
1622
			DRM_DEBUG_KMS("enum path resources nak received\n");
1625
		else {
1623
		else {
1626
			if (port->port_num != txmsg->reply.u.path_resources.port_number)
1624
			if (port->port_num != txmsg->reply.u.path_resources.port_number)
1627
				DRM_ERROR("got incorrect port in response\n");
1625
				DRM_ERROR("got incorrect port in response\n");
1628
			DRM_DEBUG_KMS("enum path resources %d: %d %d\n", txmsg->reply.u.path_resources.port_number, txmsg->reply.u.path_resources.full_payload_bw_number,
1626
			DRM_DEBUG_KMS("enum path resources %d: %d %d\n", txmsg->reply.u.path_resources.port_number, txmsg->reply.u.path_resources.full_payload_bw_number,
1629
			       txmsg->reply.u.path_resources.avail_payload_bw_number);
1627
			       txmsg->reply.u.path_resources.avail_payload_bw_number);
1630
			port->available_pbn = txmsg->reply.u.path_resources.avail_payload_bw_number;
1628
			port->available_pbn = txmsg->reply.u.path_resources.avail_payload_bw_number;
1631
		}
1629
		}
1632
	}
1630
	}
1633
 
1631
 
1634
	kfree(txmsg);
1632
	kfree(txmsg);
1635
	return 0;
1633
	return 0;
1636
}
1634
}
1637
 
1635
 
1638
static struct drm_dp_mst_port *drm_dp_get_last_connected_port_to_mstb(struct drm_dp_mst_branch *mstb)
1636
static struct drm_dp_mst_port *drm_dp_get_last_connected_port_to_mstb(struct drm_dp_mst_branch *mstb)
1639
{
1637
{
1640
	if (!mstb->port_parent)
1638
	if (!mstb->port_parent)
1641
		return NULL;
1639
		return NULL;
1642
 
1640
 
1643
	if (mstb->port_parent->mstb != mstb)
1641
	if (mstb->port_parent->mstb != mstb)
1644
		return mstb->port_parent;
1642
		return mstb->port_parent;
1645
 
1643
 
1646
	return drm_dp_get_last_connected_port_to_mstb(mstb->port_parent->parent);
1644
	return drm_dp_get_last_connected_port_to_mstb(mstb->port_parent->parent);
1647
}
1645
}
1648
 
1646
 
1649
static struct drm_dp_mst_branch *drm_dp_get_last_connected_port_and_mstb(struct drm_dp_mst_topology_mgr *mgr,
1647
static struct drm_dp_mst_branch *drm_dp_get_last_connected_port_and_mstb(struct drm_dp_mst_topology_mgr *mgr,
1650
									 struct drm_dp_mst_branch *mstb,
1648
									 struct drm_dp_mst_branch *mstb,
1651
									 int *port_num)
1649
									 int *port_num)
1652
{
1650
{
1653
	struct drm_dp_mst_branch *rmstb = NULL;
1651
	struct drm_dp_mst_branch *rmstb = NULL;
1654
	struct drm_dp_mst_port *found_port;
1652
	struct drm_dp_mst_port *found_port;
1655
	mutex_lock(&mgr->lock);
1653
	mutex_lock(&mgr->lock);
1656
	if (mgr->mst_primary) {
1654
	if (mgr->mst_primary) {
1657
		found_port = drm_dp_get_last_connected_port_to_mstb(mstb);
1655
		found_port = drm_dp_get_last_connected_port_to_mstb(mstb);
1658
 
1656
 
1659
		if (found_port) {
1657
		if (found_port) {
1660
			rmstb = found_port->parent;
1658
			rmstb = found_port->parent;
1661
			kref_get(&rmstb->kref);
1659
			kref_get(&rmstb->kref);
1662
			*port_num = found_port->port_num;
1660
			*port_num = found_port->port_num;
1663
		}
1661
		}
1664
	}
1662
	}
1665
	mutex_unlock(&mgr->lock);
1663
	mutex_unlock(&mgr->lock);
1666
	return rmstb;
1664
	return rmstb;
1667
}
1665
}
1668
 
1666
 
1669
static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
1667
static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
1670
				   struct drm_dp_mst_port *port,
1668
				   struct drm_dp_mst_port *port,
1671
				   int id,
1669
				   int id,
1672
				   int pbn)
1670
				   int pbn)
1673
{
1671
{
1674
	struct drm_dp_sideband_msg_tx *txmsg;
1672
	struct drm_dp_sideband_msg_tx *txmsg;
1675
	struct drm_dp_mst_branch *mstb;
1673
	struct drm_dp_mst_branch *mstb;
1676
	int len, ret, port_num;
1674
	int len, ret, port_num;
-
 
1675
	u8 sinks[DRM_DP_MAX_SDP_STREAMS];
-
 
1676
	int i;
1677
 
1677
 
1678
	port = drm_dp_get_validated_port_ref(mgr, port);
1678
	port = drm_dp_get_validated_port_ref(mgr, port);
1679
	if (!port)
1679
	if (!port)
1680
		return -EINVAL;
1680
		return -EINVAL;
1681
 
1681
 
1682
	port_num = port->port_num;
1682
	port_num = port->port_num;
1683
	mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
1683
	mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
1684
	if (!mstb) {
1684
	if (!mstb) {
1685
		mstb = drm_dp_get_last_connected_port_and_mstb(mgr, port->parent, &port_num);
1685
		mstb = drm_dp_get_last_connected_port_and_mstb(mgr, port->parent, &port_num);
1686
 
1686
 
1687
		if (!mstb) {
1687
		if (!mstb) {
1688
			drm_dp_put_port(port);
1688
			drm_dp_put_port(port);
1689
		return -EINVAL;
1689
		return -EINVAL;
1690
	}
1690
	}
1691
	}
1691
	}
1692
 
1692
 
1693
	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
1693
	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
1694
	if (!txmsg) {
1694
	if (!txmsg) {
1695
		ret = -ENOMEM;
1695
		ret = -ENOMEM;
1696
		goto fail_put;
1696
		goto fail_put;
1697
	}
1697
	}
-
 
1698
 
-
 
1699
	for (i = 0; i < port->num_sdp_streams; i++)
-
 
1700
		sinks[i] = i;
1698
 
1701
 
1699
	txmsg->dst = mstb;
1702
	txmsg->dst = mstb;
1700
	len = build_allocate_payload(txmsg, port_num,
1703
	len = build_allocate_payload(txmsg, port_num,
1701
				     id,
1704
				     id,
1702
				     pbn);
1705
				     pbn, port->num_sdp_streams, sinks);
1703
 
1706
 
1704
	drm_dp_queue_down_tx(mgr, txmsg);
1707
	drm_dp_queue_down_tx(mgr, txmsg);
1705
 
1708
 
1706
	ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
1709
	ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
1707
	if (ret > 0) {
1710
	if (ret > 0) {
1708
		if (txmsg->reply.reply_type == 1) {
1711
		if (txmsg->reply.reply_type == 1) {
1709
			ret = -EINVAL;
1712
			ret = -EINVAL;
1710
		} else
1713
		} else
1711
			ret = 0;
1714
			ret = 0;
1712
	}
1715
	}
1713
	kfree(txmsg);
1716
	kfree(txmsg);
1714
fail_put:
1717
fail_put:
1715
	drm_dp_put_mst_branch_device(mstb);
1718
	drm_dp_put_mst_branch_device(mstb);
1716
	drm_dp_put_port(port);
1719
	drm_dp_put_port(port);
1717
	return ret;
1720
	return ret;
1718
}
1721
}
1719
 
1722
 
1720
static int drm_dp_create_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
1723
static int drm_dp_create_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
1721
				       int id,
1724
				       int id,
1722
				       struct drm_dp_payload *payload)
1725
				       struct drm_dp_payload *payload)
1723
{
1726
{
1724
	int ret;
1727
	int ret;
1725
 
1728
 
1726
	ret = drm_dp_dpcd_write_payload(mgr, id, payload);
1729
	ret = drm_dp_dpcd_write_payload(mgr, id, payload);
1727
	if (ret < 0) {
1730
	if (ret < 0) {
1728
		payload->payload_state = 0;
1731
		payload->payload_state = 0;
1729
		return ret;
1732
		return ret;
1730
	}
1733
	}
1731
	payload->payload_state = DP_PAYLOAD_LOCAL;
1734
	payload->payload_state = DP_PAYLOAD_LOCAL;
1732
	return 0;
1735
	return 0;
1733
}
1736
}
1734
 
1737
 
1735
static int drm_dp_create_payload_step2(struct drm_dp_mst_topology_mgr *mgr,
1738
static int drm_dp_create_payload_step2(struct drm_dp_mst_topology_mgr *mgr,
1736
				       struct drm_dp_mst_port *port,
1739
				       struct drm_dp_mst_port *port,
1737
				       int id,
1740
				       int id,
1738
				       struct drm_dp_payload *payload)
1741
				       struct drm_dp_payload *payload)
1739
{
1742
{
1740
	int ret;
1743
	int ret;
1741
	ret = drm_dp_payload_send_msg(mgr, port, id, port->vcpi.pbn);
1744
	ret = drm_dp_payload_send_msg(mgr, port, id, port->vcpi.pbn);
1742
	if (ret < 0)
1745
	if (ret < 0)
1743
		return ret;
1746
		return ret;
1744
	payload->payload_state = DP_PAYLOAD_REMOTE;
1747
	payload->payload_state = DP_PAYLOAD_REMOTE;
1745
	return ret;
1748
	return ret;
1746
}
1749
}
1747
 
1750
 
1748
static int drm_dp_destroy_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
1751
static int drm_dp_destroy_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
1749
					struct drm_dp_mst_port *port,
1752
					struct drm_dp_mst_port *port,
1750
					int id,
1753
					int id,
1751
					struct drm_dp_payload *payload)
1754
					struct drm_dp_payload *payload)
1752
{
1755
{
1753
	DRM_DEBUG_KMS("\n");
1756
	DRM_DEBUG_KMS("\n");
1754
	/* its okay for these to fail */
1757
	/* its okay for these to fail */
1755
	if (port) {
1758
	if (port) {
1756
		drm_dp_payload_send_msg(mgr, port, id, 0);
1759
		drm_dp_payload_send_msg(mgr, port, id, 0);
1757
	}
1760
	}
1758
 
1761
 
1759
	drm_dp_dpcd_write_payload(mgr, id, payload);
1762
	drm_dp_dpcd_write_payload(mgr, id, payload);
1760
	payload->payload_state = DP_PAYLOAD_DELETE_LOCAL;
1763
	payload->payload_state = DP_PAYLOAD_DELETE_LOCAL;
1761
	return 0;
1764
	return 0;
1762
}
1765
}
1763
 
1766
 
1764
static int drm_dp_destroy_payload_step2(struct drm_dp_mst_topology_mgr *mgr,
1767
static int drm_dp_destroy_payload_step2(struct drm_dp_mst_topology_mgr *mgr,
1765
					int id,
1768
					int id,
1766
					struct drm_dp_payload *payload)
1769
					struct drm_dp_payload *payload)
1767
{
1770
{
1768
	payload->payload_state = 0;
1771
	payload->payload_state = 0;
1769
	return 0;
1772
	return 0;
1770
}
1773
}
1771
 
1774
 
1772
/**
1775
/**
1773
 * drm_dp_update_payload_part1() - Execute payload update part 1
1776
 * drm_dp_update_payload_part1() - Execute payload update part 1
1774
 * @mgr: manager to use.
1777
 * @mgr: manager to use.
1775
 *
1778
 *
1776
 * This iterates over all proposed virtual channels, and tries to
1779
 * This iterates over all proposed virtual channels, and tries to
1777
 * allocate space in the link for them. For 0->slots transitions,
1780
 * allocate space in the link for them. For 0->slots transitions,
1778
 * this step just writes the VCPI to the MST device. For slots->0
1781
 * this step just writes the VCPI to the MST device. For slots->0
1779
 * transitions, this writes the updated VCPIs and removes the
1782
 * transitions, this writes the updated VCPIs and removes the
1780
 * remote VC payloads.
1783
 * remote VC payloads.
1781
 *
1784
 *
1782
 * after calling this the driver should generate ACT and payload
1785
 * after calling this the driver should generate ACT and payload
1783
 * packets.
1786
 * packets.
1784
 */
1787
 */
1785
int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
1788
int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
1786
{
1789
{
1787
	int i, j;
1790
	int i, j;
1788
	int cur_slots = 1;
1791
	int cur_slots = 1;
1789
	struct drm_dp_payload req_payload;
1792
	struct drm_dp_payload req_payload;
1790
	struct drm_dp_mst_port *port;
1793
	struct drm_dp_mst_port *port;
1791
 
1794
 
1792
	mutex_lock(&mgr->payload_lock);
1795
	mutex_lock(&mgr->payload_lock);
1793
	for (i = 0; i < mgr->max_payloads; i++) {
1796
	for (i = 0; i < mgr->max_payloads; i++) {
1794
		/* solve the current payloads - compare to the hw ones
1797
		/* solve the current payloads - compare to the hw ones
1795
		   - update the hw view */
1798
		   - update the hw view */
1796
		req_payload.start_slot = cur_slots;
1799
		req_payload.start_slot = cur_slots;
1797
		if (mgr->proposed_vcpis[i]) {
1800
		if (mgr->proposed_vcpis[i]) {
1798
			port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
1801
			port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
1799
			port = drm_dp_get_validated_port_ref(mgr, port);
1802
			port = drm_dp_get_validated_port_ref(mgr, port);
1800
			if (!port) {
1803
			if (!port) {
1801
				mutex_unlock(&mgr->payload_lock);
1804
				mutex_unlock(&mgr->payload_lock);
1802
				return -EINVAL;
1805
				return -EINVAL;
1803
			}
1806
			}
1804
			req_payload.num_slots = mgr->proposed_vcpis[i]->num_slots;
1807
			req_payload.num_slots = mgr->proposed_vcpis[i]->num_slots;
-
 
1808
			req_payload.vcpi = mgr->proposed_vcpis[i]->vcpi;
1805
		} else {
1809
		} else {
1806
			port = NULL;
1810
			port = NULL;
1807
			req_payload.num_slots = 0;
1811
			req_payload.num_slots = 0;
1808
		}
1812
		}
1809
 
1813
 
1810
		if (mgr->payloads[i].start_slot != req_payload.start_slot) {
1814
		if (mgr->payloads[i].start_slot != req_payload.start_slot) {
1811
			mgr->payloads[i].start_slot = req_payload.start_slot;
1815
			mgr->payloads[i].start_slot = req_payload.start_slot;
1812
		}
1816
		}
1813
		/* work out what is required to happen with this payload */
1817
		/* work out what is required to happen with this payload */
1814
		if (mgr->payloads[i].num_slots != req_payload.num_slots) {
1818
		if (mgr->payloads[i].num_slots != req_payload.num_slots) {
1815
 
1819
 
1816
			/* need to push an update for this payload */
1820
			/* need to push an update for this payload */
1817
			if (req_payload.num_slots) {
1821
			if (req_payload.num_slots) {
1818
				drm_dp_create_payload_step1(mgr, mgr->proposed_vcpis[i]->vcpi, &req_payload);
1822
				drm_dp_create_payload_step1(mgr, mgr->proposed_vcpis[i]->vcpi, &req_payload);
1819
				mgr->payloads[i].num_slots = req_payload.num_slots;
1823
				mgr->payloads[i].num_slots = req_payload.num_slots;
-
 
1824
				mgr->payloads[i].vcpi = req_payload.vcpi;
1820
			} else if (mgr->payloads[i].num_slots) {
1825
			} else if (mgr->payloads[i].num_slots) {
1821
				mgr->payloads[i].num_slots = 0;
1826
				mgr->payloads[i].num_slots = 0;
1822
				drm_dp_destroy_payload_step1(mgr, port, mgr->payloads[i].vcpi, &mgr->payloads[i]);
1827
				drm_dp_destroy_payload_step1(mgr, port, port->vcpi.vcpi, &mgr->payloads[i]);
1823
				req_payload.payload_state = mgr->payloads[i].payload_state;
1828
				req_payload.payload_state = mgr->payloads[i].payload_state;
1824
				mgr->payloads[i].start_slot = 0;
1829
				mgr->payloads[i].start_slot = 0;
1825
			}
1830
			}
1826
			mgr->payloads[i].payload_state = req_payload.payload_state;
1831
			mgr->payloads[i].payload_state = req_payload.payload_state;
1827
		}
1832
		}
1828
		cur_slots += req_payload.num_slots;
1833
		cur_slots += req_payload.num_slots;
1829
 
1834
 
1830
		if (port)
1835
		if (port)
1831
			drm_dp_put_port(port);
1836
			drm_dp_put_port(port);
1832
	}
1837
	}
1833
 
1838
 
1834
	for (i = 0; i < mgr->max_payloads; i++) {
1839
	for (i = 0; i < mgr->max_payloads; i++) {
1835
		if (mgr->payloads[i].payload_state == DP_PAYLOAD_DELETE_LOCAL) {
1840
		if (mgr->payloads[i].payload_state == DP_PAYLOAD_DELETE_LOCAL) {
1836
			DRM_DEBUG_KMS("removing payload %d\n", i);
1841
			DRM_DEBUG_KMS("removing payload %d\n", i);
1837
			for (j = i; j < mgr->max_payloads - 1; j++) {
1842
			for (j = i; j < mgr->max_payloads - 1; j++) {
1838
				memcpy(&mgr->payloads[j], &mgr->payloads[j + 1], sizeof(struct drm_dp_payload));
1843
				memcpy(&mgr->payloads[j], &mgr->payloads[j + 1], sizeof(struct drm_dp_payload));
1839
				mgr->proposed_vcpis[j] = mgr->proposed_vcpis[j + 1];
1844
				mgr->proposed_vcpis[j] = mgr->proposed_vcpis[j + 1];
1840
				if (mgr->proposed_vcpis[j] && mgr->proposed_vcpis[j]->num_slots) {
1845
				if (mgr->proposed_vcpis[j] && mgr->proposed_vcpis[j]->num_slots) {
1841
					set_bit(j + 1, &mgr->payload_mask);
1846
					set_bit(j + 1, &mgr->payload_mask);
1842
				} else {
1847
				} else {
1843
					clear_bit(j + 1, &mgr->payload_mask);
1848
					clear_bit(j + 1, &mgr->payload_mask);
1844
				}
1849
				}
1845
			}
1850
			}
1846
			memset(&mgr->payloads[mgr->max_payloads - 1], 0, sizeof(struct drm_dp_payload));
1851
			memset(&mgr->payloads[mgr->max_payloads - 1], 0, sizeof(struct drm_dp_payload));
1847
			mgr->proposed_vcpis[mgr->max_payloads - 1] = NULL;
1852
			mgr->proposed_vcpis[mgr->max_payloads - 1] = NULL;
1848
			clear_bit(mgr->max_payloads, &mgr->payload_mask);
1853
			clear_bit(mgr->max_payloads, &mgr->payload_mask);
1849
 
1854
 
1850
		}
1855
		}
1851
	}
1856
	}
1852
	mutex_unlock(&mgr->payload_lock);
1857
	mutex_unlock(&mgr->payload_lock);
1853
 
1858
 
1854
	return 0;
1859
	return 0;
1855
}
1860
}
1856
EXPORT_SYMBOL(drm_dp_update_payload_part1);
1861
EXPORT_SYMBOL(drm_dp_update_payload_part1);
1857
 
1862
 
1858
/**
1863
/**
1859
 * drm_dp_update_payload_part2() - Execute payload update part 2
1864
 * drm_dp_update_payload_part2() - Execute payload update part 2
1860
 * @mgr: manager to use.
1865
 * @mgr: manager to use.
1861
 *
1866
 *
1862
 * This iterates over all proposed virtual channels, and tries to
1867
 * This iterates over all proposed virtual channels, and tries to
1863
 * allocate space in the link for them. For 0->slots transitions,
1868
 * allocate space in the link for them. For 0->slots transitions,
1864
 * this step writes the remote VC payload commands. For slots->0
1869
 * this step writes the remote VC payload commands. For slots->0
1865
 * this just resets some internal state.
1870
 * this just resets some internal state.
1866
 */
1871
 */
1867
int drm_dp_update_payload_part2(struct drm_dp_mst_topology_mgr *mgr)
1872
int drm_dp_update_payload_part2(struct drm_dp_mst_topology_mgr *mgr)
1868
{
1873
{
1869
	struct drm_dp_mst_port *port;
1874
	struct drm_dp_mst_port *port;
1870
	int i;
1875
	int i;
1871
	int ret = 0;
1876
	int ret = 0;
1872
	mutex_lock(&mgr->payload_lock);
1877
	mutex_lock(&mgr->payload_lock);
1873
	for (i = 0; i < mgr->max_payloads; i++) {
1878
	for (i = 0; i < mgr->max_payloads; i++) {
1874
 
1879
 
1875
		if (!mgr->proposed_vcpis[i])
1880
		if (!mgr->proposed_vcpis[i])
1876
			continue;
1881
			continue;
1877
 
1882
 
1878
		port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
1883
		port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
1879
 
1884
 
1880
		DRM_DEBUG_KMS("payload %d %d\n", i, mgr->payloads[i].payload_state);
1885
		DRM_DEBUG_KMS("payload %d %d\n", i, mgr->payloads[i].payload_state);
1881
		if (mgr->payloads[i].payload_state == DP_PAYLOAD_LOCAL) {
1886
		if (mgr->payloads[i].payload_state == DP_PAYLOAD_LOCAL) {
1882
			ret = drm_dp_create_payload_step2(mgr, port, mgr->proposed_vcpis[i]->vcpi, &mgr->payloads[i]);
1887
			ret = drm_dp_create_payload_step2(mgr, port, mgr->proposed_vcpis[i]->vcpi, &mgr->payloads[i]);
1883
		} else if (mgr->payloads[i].payload_state == DP_PAYLOAD_DELETE_LOCAL) {
1888
		} else if (mgr->payloads[i].payload_state == DP_PAYLOAD_DELETE_LOCAL) {
1884
			ret = drm_dp_destroy_payload_step2(mgr, mgr->proposed_vcpis[i]->vcpi, &mgr->payloads[i]);
1889
			ret = drm_dp_destroy_payload_step2(mgr, mgr->proposed_vcpis[i]->vcpi, &mgr->payloads[i]);
1885
		}
1890
		}
1886
		if (ret) {
1891
		if (ret) {
1887
			mutex_unlock(&mgr->payload_lock);
1892
			mutex_unlock(&mgr->payload_lock);
1888
			return ret;
1893
			return ret;
1889
		}
1894
		}
1890
	}
1895
	}
1891
	mutex_unlock(&mgr->payload_lock);
1896
	mutex_unlock(&mgr->payload_lock);
1892
	return 0;
1897
	return 0;
1893
}
1898
}
1894
EXPORT_SYMBOL(drm_dp_update_payload_part2);
1899
EXPORT_SYMBOL(drm_dp_update_payload_part2);
1895
 
1900
 
1896
#if 0 /* unused as of yet */
1901
#if 0 /* unused as of yet */
1897
static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
1902
static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
1898
				 struct drm_dp_mst_port *port,
1903
				 struct drm_dp_mst_port *port,
1899
				 int offset, int size)
1904
				 int offset, int size)
1900
{
1905
{
1901
	int len;
1906
	int len;
1902
	struct drm_dp_sideband_msg_tx *txmsg;
1907
	struct drm_dp_sideband_msg_tx *txmsg;
1903
 
1908
 
1904
	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
1909
	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
1905
	if (!txmsg)
1910
	if (!txmsg)
1906
		return -ENOMEM;
1911
		return -ENOMEM;
1907
 
1912
 
1908
	len = build_dpcd_read(txmsg, port->port_num, 0, 8);
1913
	len = build_dpcd_read(txmsg, port->port_num, 0, 8);
1909
	txmsg->dst = port->parent;
1914
	txmsg->dst = port->parent;
1910
 
1915
 
1911
	drm_dp_queue_down_tx(mgr, txmsg);
1916
	drm_dp_queue_down_tx(mgr, txmsg);
1912
 
1917
 
1913
	return 0;
1918
	return 0;
1914
}
1919
}
1915
#endif
1920
#endif
1916
 
1921
 
1917
static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
1922
static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
1918
				  struct drm_dp_mst_port *port,
1923
				  struct drm_dp_mst_port *port,
1919
				  int offset, int size, u8 *bytes)
1924
				  int offset, int size, u8 *bytes)
1920
{
1925
{
1921
	int len;
1926
	int len;
1922
	int ret;
1927
	int ret;
1923
	struct drm_dp_sideband_msg_tx *txmsg;
1928
	struct drm_dp_sideband_msg_tx *txmsg;
1924
	struct drm_dp_mst_branch *mstb;
1929
	struct drm_dp_mst_branch *mstb;
1925
 
1930
 
1926
	mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
1931
	mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
1927
	if (!mstb)
1932
	if (!mstb)
1928
		return -EINVAL;
1933
		return -EINVAL;
1929
 
1934
 
1930
	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
1935
	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
1931
	if (!txmsg) {
1936
	if (!txmsg) {
1932
		ret = -ENOMEM;
1937
		ret = -ENOMEM;
1933
		goto fail_put;
1938
		goto fail_put;
1934
	}
1939
	}
1935
 
1940
 
1936
	len = build_dpcd_write(txmsg, port->port_num, offset, size, bytes);
1941
	len = build_dpcd_write(txmsg, port->port_num, offset, size, bytes);
1937
	txmsg->dst = mstb;
1942
	txmsg->dst = mstb;
1938
 
1943
 
1939
	drm_dp_queue_down_tx(mgr, txmsg);
1944
	drm_dp_queue_down_tx(mgr, txmsg);
1940
 
1945
 
1941
	ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
1946
	ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
1942
	if (ret > 0) {
1947
	if (ret > 0) {
1943
		if (txmsg->reply.reply_type == 1) {
1948
		if (txmsg->reply.reply_type == 1) {
1944
			ret = -EINVAL;
1949
			ret = -EINVAL;
1945
		} else
1950
		} else
1946
			ret = 0;
1951
			ret = 0;
1947
	}
1952
	}
1948
	kfree(txmsg);
1953
	kfree(txmsg);
1949
fail_put:
1954
fail_put:
1950
	drm_dp_put_mst_branch_device(mstb);
1955
	drm_dp_put_mst_branch_device(mstb);
1951
	return ret;
1956
	return ret;
1952
}
1957
}
1953
 
1958
 
1954
static int drm_dp_encode_up_ack_reply(struct drm_dp_sideband_msg_tx *msg, u8 req_type)
1959
static int drm_dp_encode_up_ack_reply(struct drm_dp_sideband_msg_tx *msg, u8 req_type)
1955
{
1960
{
1956
	struct drm_dp_sideband_msg_reply_body reply;
1961
	struct drm_dp_sideband_msg_reply_body reply;
1957
 
1962
 
1958
	reply.reply_type = 1;
1963
	reply.reply_type = 0;
1959
	reply.req_type = req_type;
1964
	reply.req_type = req_type;
1960
	drm_dp_encode_sideband_reply(&reply, msg);
1965
	drm_dp_encode_sideband_reply(&reply, msg);
1961
	return 0;
1966
	return 0;
1962
}
1967
}
1963
 
1968
 
1964
static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr,
1969
static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr,
1965
				    struct drm_dp_mst_branch *mstb,
1970
				    struct drm_dp_mst_branch *mstb,
1966
				    int req_type, int seqno, bool broadcast)
1971
				    int req_type, int seqno, bool broadcast)
1967
{
1972
{
1968
	struct drm_dp_sideband_msg_tx *txmsg;
1973
	struct drm_dp_sideband_msg_tx *txmsg;
1969
 
1974
 
1970
	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
1975
	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
1971
	if (!txmsg)
1976
	if (!txmsg)
1972
		return -ENOMEM;
1977
		return -ENOMEM;
1973
 
1978
 
1974
	txmsg->dst = mstb;
1979
	txmsg->dst = mstb;
1975
	txmsg->seqno = seqno;
1980
	txmsg->seqno = seqno;
1976
	drm_dp_encode_up_ack_reply(txmsg, req_type);
1981
	drm_dp_encode_up_ack_reply(txmsg, req_type);
1977
 
1982
 
1978
	mutex_lock(&mgr->qlock);
1983
	mutex_lock(&mgr->qlock);
1979
 
1984
 
1980
	process_single_up_tx_qlock(mgr, txmsg);
1985
	process_single_up_tx_qlock(mgr, txmsg);
1981
 
1986
 
1982
	mutex_unlock(&mgr->qlock);
1987
	mutex_unlock(&mgr->qlock);
1983
 
1988
 
1984
	kfree(txmsg);
1989
	kfree(txmsg);
1985
	return 0;
1990
	return 0;
1986
}
1991
}
1987
 
1992
 
1988
static bool drm_dp_get_vc_payload_bw(int dp_link_bw,
1993
static bool drm_dp_get_vc_payload_bw(int dp_link_bw,
1989
				     int dp_link_count,
1994
				     int dp_link_count,
1990
				     int *out)
1995
				     int *out)
1991
{
1996
{
1992
	switch (dp_link_bw) {
1997
	switch (dp_link_bw) {
1993
	default:
1998
	default:
1994
		DRM_DEBUG_KMS("invalid link bandwidth in DPCD: %x (link count: %d)\n",
1999
		DRM_DEBUG_KMS("invalid link bandwidth in DPCD: %x (link count: %d)\n",
1995
			      dp_link_bw, dp_link_count);
2000
			      dp_link_bw, dp_link_count);
1996
		return false;
2001
		return false;
1997
 
2002
 
1998
	case DP_LINK_BW_1_62:
2003
	case DP_LINK_BW_1_62:
1999
		*out = 3 * dp_link_count;
2004
		*out = 3 * dp_link_count;
2000
		break;
2005
		break;
2001
	case DP_LINK_BW_2_7:
2006
	case DP_LINK_BW_2_7:
2002
		*out = 5 * dp_link_count;
2007
		*out = 5 * dp_link_count;
2003
		break;
2008
		break;
2004
	case DP_LINK_BW_5_4:
2009
	case DP_LINK_BW_5_4:
2005
		*out = 10 * dp_link_count;
2010
		*out = 10 * dp_link_count;
2006
		break;
2011
		break;
2007
	}
2012
	}
2008
	return true;
2013
	return true;
2009
}
2014
}
2010
 
2015
 
2011
/**
2016
/**
2012
 * drm_dp_mst_topology_mgr_set_mst() - Set the MST state for a topology manager
2017
 * drm_dp_mst_topology_mgr_set_mst() - Set the MST state for a topology manager
2013
 * @mgr: manager to set state for
2018
 * @mgr: manager to set state for
2014
 * @mst_state: true to enable MST on this connector - false to disable.
2019
 * @mst_state: true to enable MST on this connector - false to disable.
2015
 *
2020
 *
2016
 * This is called by the driver when it detects an MST capable device plugged
2021
 * This is called by the driver when it detects an MST capable device plugged
2017
 * into a DP MST capable port, or when a DP MST capable device is unplugged.
2022
 * into a DP MST capable port, or when a DP MST capable device is unplugged.
2018
 */
2023
 */
2019
int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool mst_state)
2024
int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool mst_state)
2020
{
2025
{
2021
	int ret = 0;
2026
	int ret = 0;
2022
	struct drm_dp_mst_branch *mstb = NULL;
2027
	struct drm_dp_mst_branch *mstb = NULL;
2023
 
2028
 
2024
	mutex_lock(&mgr->lock);
2029
	mutex_lock(&mgr->lock);
2025
	if (mst_state == mgr->mst_state)
2030
	if (mst_state == mgr->mst_state)
2026
		goto out_unlock;
2031
		goto out_unlock;
2027
 
2032
 
2028
	mgr->mst_state = mst_state;
2033
	mgr->mst_state = mst_state;
2029
	/* set the device into MST mode */
2034
	/* set the device into MST mode */
2030
	if (mst_state) {
2035
	if (mst_state) {
2031
		WARN_ON(mgr->mst_primary);
2036
		WARN_ON(mgr->mst_primary);
2032
 
2037
 
2033
		/* get dpcd info */
2038
		/* get dpcd info */
2034
		ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE);
2039
		ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE);
2035
		if (ret != DP_RECEIVER_CAP_SIZE) {
2040
		if (ret != DP_RECEIVER_CAP_SIZE) {
2036
			DRM_DEBUG_KMS("failed to read DPCD\n");
2041
			DRM_DEBUG_KMS("failed to read DPCD\n");
2037
			goto out_unlock;
2042
			goto out_unlock;
2038
		}
2043
		}
2039
 
2044
 
2040
		if (!drm_dp_get_vc_payload_bw(mgr->dpcd[1],
2045
		if (!drm_dp_get_vc_payload_bw(mgr->dpcd[1],
2041
					      mgr->dpcd[2] & DP_MAX_LANE_COUNT_MASK,
2046
					      mgr->dpcd[2] & DP_MAX_LANE_COUNT_MASK,
2042
					      &mgr->pbn_div)) {
2047
					      &mgr->pbn_div)) {
2043
			ret = -EINVAL;
2048
			ret = -EINVAL;
2044
			goto out_unlock;
2049
			goto out_unlock;
2045
		}
2050
		}
2046
 
2051
 
2047
		mgr->total_pbn = 2560;
2052
		mgr->total_pbn = 2560;
2048
		mgr->total_slots = DIV_ROUND_UP(mgr->total_pbn, mgr->pbn_div);
2053
		mgr->total_slots = DIV_ROUND_UP(mgr->total_pbn, mgr->pbn_div);
2049
		mgr->avail_slots = mgr->total_slots;
2054
		mgr->avail_slots = mgr->total_slots;
2050
 
2055
 
2051
		/* add initial branch device at LCT 1 */
2056
		/* add initial branch device at LCT 1 */
2052
		mstb = drm_dp_add_mst_branch_device(1, NULL);
2057
		mstb = drm_dp_add_mst_branch_device(1, NULL);
2053
		if (mstb == NULL) {
2058
		if (mstb == NULL) {
2054
			ret = -ENOMEM;
2059
			ret = -ENOMEM;
2055
			goto out_unlock;
2060
			goto out_unlock;
2056
		}
2061
		}
2057
		mstb->mgr = mgr;
2062
		mstb->mgr = mgr;
2058
 
2063
 
2059
		/* give this the main reference */
2064
		/* give this the main reference */
2060
		mgr->mst_primary = mstb;
2065
		mgr->mst_primary = mstb;
2061
		kref_get(&mgr->mst_primary->kref);
2066
		kref_get(&mgr->mst_primary->kref);
2062
 
2067
 
2063
		ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
2068
		ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
2064
					 DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
2069
					 DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
2065
		if (ret < 0) {
2070
		if (ret < 0) {
2066
			goto out_unlock;
2071
			goto out_unlock;
2067
		}
2072
		}
2068
 
2073
 
2069
		{
2074
		{
2070
			struct drm_dp_payload reset_pay;
2075
			struct drm_dp_payload reset_pay;
2071
			reset_pay.start_slot = 0;
2076
			reset_pay.start_slot = 0;
2072
			reset_pay.num_slots = 0x3f;
2077
			reset_pay.num_slots = 0x3f;
2073
			drm_dp_dpcd_write_payload(mgr, 0, &reset_pay);
2078
			drm_dp_dpcd_write_payload(mgr, 0, &reset_pay);
2074
		}
2079
		}
2075
 
2080
 
2076
//       queue_work(system_long_wq, &mgr->work);
2081
//       queue_work(system_long_wq, &mgr->work);
2077
 
2082
 
2078
		ret = 0;
2083
		ret = 0;
2079
	} else {
2084
	} else {
2080
		/* disable MST on the device */
2085
		/* disable MST on the device */
2081
		mstb = mgr->mst_primary;
2086
		mstb = mgr->mst_primary;
2082
		mgr->mst_primary = NULL;
2087
		mgr->mst_primary = NULL;
2083
		/* this can fail if the device is gone */
2088
		/* this can fail if the device is gone */
2084
		drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 0);
2089
		drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 0);
2085
		ret = 0;
2090
		ret = 0;
2086
		memset(mgr->payloads, 0, mgr->max_payloads * sizeof(struct drm_dp_payload));
2091
		memset(mgr->payloads, 0, mgr->max_payloads * sizeof(struct drm_dp_payload));
2087
		mgr->payload_mask = 0;
2092
		mgr->payload_mask = 0;
2088
		set_bit(0, &mgr->payload_mask);
2093
		set_bit(0, &mgr->payload_mask);
2089
		mgr->vcpi_mask = 0;
2094
		mgr->vcpi_mask = 0;
2090
	}
2095
	}
2091
 
2096
 
2092
out_unlock:
2097
out_unlock:
2093
	mutex_unlock(&mgr->lock);
2098
	mutex_unlock(&mgr->lock);
2094
	if (mstb)
2099
	if (mstb)
2095
		drm_dp_put_mst_branch_device(mstb);
2100
		drm_dp_put_mst_branch_device(mstb);
2096
	return ret;
2101
	return ret;
2097
 
2102
 
2098
}
2103
}
2099
EXPORT_SYMBOL(drm_dp_mst_topology_mgr_set_mst);
2104
EXPORT_SYMBOL(drm_dp_mst_topology_mgr_set_mst);
2100
 
2105
 
2101
/**
2106
/**
2102
 * drm_dp_mst_topology_mgr_suspend() - suspend the MST manager
2107
 * drm_dp_mst_topology_mgr_suspend() - suspend the MST manager
2103
 * @mgr: manager to suspend
2108
 * @mgr: manager to suspend
2104
 *
2109
 *
2105
 * This function tells the MST device that we can't handle UP messages
2110
 * This function tells the MST device that we can't handle UP messages
2106
 * anymore. This should stop it from sending any since we are suspended.
2111
 * anymore. This should stop it from sending any since we are suspended.
2107
 */
2112
 */
2108
void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr)
2113
void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr)
2109
{
2114
{
2110
	mutex_lock(&mgr->lock);
2115
	mutex_lock(&mgr->lock);
2111
	drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
2116
	drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
2112
			   DP_MST_EN | DP_UPSTREAM_IS_SRC);
2117
			   DP_MST_EN | DP_UPSTREAM_IS_SRC);
2113
	mutex_unlock(&mgr->lock);
2118
	mutex_unlock(&mgr->lock);
2114
}
2119
}
2115
EXPORT_SYMBOL(drm_dp_mst_topology_mgr_suspend);
2120
EXPORT_SYMBOL(drm_dp_mst_topology_mgr_suspend);
2116
 
2121
 
2117
/**
2122
/**
2118
 * drm_dp_mst_topology_mgr_resume() - resume the MST manager
2123
 * drm_dp_mst_topology_mgr_resume() - resume the MST manager
2119
 * @mgr: manager to resume
2124
 * @mgr: manager to resume
2120
 *
2125
 *
2121
 * This will fetch DPCD and see if the device is still there,
2126
 * This will fetch DPCD and see if the device is still there,
2122
 * if it is, it will rewrite the MSTM control bits, and return.
2127
 * if it is, it will rewrite the MSTM control bits, and return.
2123
 *
2128
 *
2124
 * if the device fails this returns -1, and the driver should do
2129
 * if the device fails this returns -1, and the driver should do
2125
 * a full MST reprobe, in case we were undocked.
2130
 * a full MST reprobe, in case we were undocked.
2126
 */
2131
 */
2127
int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr)
2132
int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr)
2128
{
2133
{
2129
	int ret = 0;
2134
	int ret = 0;
2130
 
2135
 
2131
	mutex_lock(&mgr->lock);
2136
	mutex_lock(&mgr->lock);
2132
 
2137
 
2133
	if (mgr->mst_primary) {
2138
	if (mgr->mst_primary) {
2134
		int sret;
2139
		int sret;
2135
		u8 guid[16];
2140
		u8 guid[16];
2136
 
2141
 
2137
		sret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE);
2142
		sret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE);
2138
		if (sret != DP_RECEIVER_CAP_SIZE) {
2143
		if (sret != DP_RECEIVER_CAP_SIZE) {
2139
			DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
2144
			DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
2140
			ret = -1;
2145
			ret = -1;
2141
			goto out_unlock;
2146
			goto out_unlock;
2142
		}
2147
		}
2143
 
2148
 
2144
		ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
2149
		ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
2145
					 DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
2150
					 DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
2146
		if (ret < 0) {
2151
		if (ret < 0) {
2147
			DRM_DEBUG_KMS("mst write failed - undocked during suspend?\n");
2152
			DRM_DEBUG_KMS("mst write failed - undocked during suspend?\n");
2148
			ret = -1;
2153
			ret = -1;
2149
			goto out_unlock;
2154
			goto out_unlock;
2150
		}
2155
		}
2151
 
2156
 
2152
		/* Some hubs forget their guids after they resume */
2157
		/* Some hubs forget their guids after they resume */
2153
		sret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16);
2158
		sret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16);
2154
		if (sret != 16) {
2159
		if (sret != 16) {
2155
			DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
2160
			DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
2156
			ret = -1;
2161
			ret = -1;
2157
			goto out_unlock;
2162
			goto out_unlock;
2158
		}
2163
		}
2159
		drm_dp_check_mstb_guid(mgr->mst_primary, guid);
2164
		drm_dp_check_mstb_guid(mgr->mst_primary, guid);
2160
 
2165
 
2161
		ret = 0;
2166
		ret = 0;
2162
	} else
2167
	} else
2163
		ret = -1;
2168
		ret = -1;
2164
 
2169
 
2165
out_unlock:
2170
out_unlock:
2166
	mutex_unlock(&mgr->lock);
2171
	mutex_unlock(&mgr->lock);
2167
	return ret;
2172
	return ret;
2168
}
2173
}
2169
EXPORT_SYMBOL(drm_dp_mst_topology_mgr_resume);
2174
EXPORT_SYMBOL(drm_dp_mst_topology_mgr_resume);
2170
 
2175
 
2171
static void drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up)
2176
static void drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up)
2172
{
2177
{
2173
	int len;
2178
	int len;
2174
	u8 replyblock[32];
2179
	u8 replyblock[32];
2175
	int replylen, origlen, curreply;
2180
	int replylen, origlen, curreply;
2176
	int ret;
2181
	int ret;
2177
	struct drm_dp_sideband_msg_rx *msg;
2182
	struct drm_dp_sideband_msg_rx *msg;
2178
	int basereg = up ? DP_SIDEBAND_MSG_UP_REQ_BASE : DP_SIDEBAND_MSG_DOWN_REP_BASE;
2183
	int basereg = up ? DP_SIDEBAND_MSG_UP_REQ_BASE : DP_SIDEBAND_MSG_DOWN_REP_BASE;
2179
	msg = up ? &mgr->up_req_recv : &mgr->down_rep_recv;
2184
	msg = up ? &mgr->up_req_recv : &mgr->down_rep_recv;
2180
 
2185
 
2181
	len = min(mgr->max_dpcd_transaction_bytes, 16);
2186
	len = min(mgr->max_dpcd_transaction_bytes, 16);
2182
	ret = drm_dp_dpcd_read(mgr->aux, basereg,
2187
	ret = drm_dp_dpcd_read(mgr->aux, basereg,
2183
			       replyblock, len);
2188
			       replyblock, len);
2184
	if (ret != len) {
2189
	if (ret != len) {
2185
		DRM_DEBUG_KMS("failed to read DPCD down rep %d %d\n", len, ret);
2190
		DRM_DEBUG_KMS("failed to read DPCD down rep %d %d\n", len, ret);
2186
		return;
2191
		return;
2187
	}
2192
	}
2188
	ret = drm_dp_sideband_msg_build(msg, replyblock, len, true);
2193
	ret = drm_dp_sideband_msg_build(msg, replyblock, len, true);
2189
	if (!ret) {
2194
	if (!ret) {
2190
		DRM_DEBUG_KMS("sideband msg build failed %d\n", replyblock[0]);
2195
		DRM_DEBUG_KMS("sideband msg build failed %d\n", replyblock[0]);
2191
		return;
2196
		return;
2192
	}
2197
	}
2193
	replylen = msg->curchunk_len + msg->curchunk_hdrlen;
2198
	replylen = msg->curchunk_len + msg->curchunk_hdrlen;
2194
 
2199
 
2195
	origlen = replylen;
2200
	origlen = replylen;
2196
	replylen -= len;
2201
	replylen -= len;
2197
	curreply = len;
2202
	curreply = len;
2198
	while (replylen > 0) {
2203
	while (replylen > 0) {
2199
		len = min3(replylen, mgr->max_dpcd_transaction_bytes, 16);
2204
		len = min3(replylen, mgr->max_dpcd_transaction_bytes, 16);
2200
		ret = drm_dp_dpcd_read(mgr->aux, basereg + curreply,
2205
		ret = drm_dp_dpcd_read(mgr->aux, basereg + curreply,
2201
				    replyblock, len);
2206
				    replyblock, len);
2202
		if (ret != len) {
2207
		if (ret != len) {
2203
			DRM_DEBUG_KMS("failed to read a chunk\n");
2208
			DRM_DEBUG_KMS("failed to read a chunk\n");
2204
		}
2209
		}
2205
		ret = drm_dp_sideband_msg_build(msg, replyblock, len, false);
2210
		ret = drm_dp_sideband_msg_build(msg, replyblock, len, false);
2206
		if (ret == false)
2211
		if (ret == false)
2207
			DRM_DEBUG_KMS("failed to build sideband msg\n");
2212
			DRM_DEBUG_KMS("failed to build sideband msg\n");
2208
		curreply += len;
2213
		curreply += len;
2209
		replylen -= len;
2214
		replylen -= len;
2210
	}
2215
	}
2211
}
2216
}
2212
 
2217
 
2213
static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
2218
static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
2214
{
2219
{
2215
	int ret = 0;
2220
	int ret = 0;
2216
 
2221
 
2217
	drm_dp_get_one_sb_msg(mgr, false);
2222
	drm_dp_get_one_sb_msg(mgr, false);
2218
 
2223
 
2219
	if (mgr->down_rep_recv.have_eomt) {
2224
	if (mgr->down_rep_recv.have_eomt) {
2220
		struct drm_dp_sideband_msg_tx *txmsg;
2225
		struct drm_dp_sideband_msg_tx *txmsg;
2221
		struct drm_dp_mst_branch *mstb;
2226
		struct drm_dp_mst_branch *mstb;
2222
		int slot = -1;
2227
		int slot = -1;
2223
		mstb = drm_dp_get_mst_branch_device(mgr,
2228
		mstb = drm_dp_get_mst_branch_device(mgr,
2224
						    mgr->down_rep_recv.initial_hdr.lct,
2229
						    mgr->down_rep_recv.initial_hdr.lct,
2225
						    mgr->down_rep_recv.initial_hdr.rad);
2230
						    mgr->down_rep_recv.initial_hdr.rad);
2226
 
2231
 
2227
		if (!mstb) {
2232
		if (!mstb) {
2228
			DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->down_rep_recv.initial_hdr.lct);
2233
			DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->down_rep_recv.initial_hdr.lct);
2229
			memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2234
			memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2230
			return 0;
2235
			return 0;
2231
		}
2236
		}
2232
 
2237
 
2233
		/* find the message */
2238
		/* find the message */
2234
		slot = mgr->down_rep_recv.initial_hdr.seqno;
2239
		slot = mgr->down_rep_recv.initial_hdr.seqno;
2235
		mutex_lock(&mgr->qlock);
2240
		mutex_lock(&mgr->qlock);
2236
		txmsg = mstb->tx_slots[slot];
2241
		txmsg = mstb->tx_slots[slot];
2237
		/* remove from slots */
2242
		/* remove from slots */
2238
		mutex_unlock(&mgr->qlock);
2243
		mutex_unlock(&mgr->qlock);
2239
 
2244
 
2240
		if (!txmsg) {
2245
		if (!txmsg) {
2241
			DRM_DEBUG_KMS("Got MST reply with no msg %p %d %d %02x %02x\n",
2246
			DRM_DEBUG_KMS("Got MST reply with no msg %p %d %d %02x %02x\n",
2242
			       mstb,
2247
			       mstb,
2243
			       mgr->down_rep_recv.initial_hdr.seqno,
2248
			       mgr->down_rep_recv.initial_hdr.seqno,
2244
			       mgr->down_rep_recv.initial_hdr.lct,
2249
			       mgr->down_rep_recv.initial_hdr.lct,
2245
				      mgr->down_rep_recv.initial_hdr.rad[0],
2250
				      mgr->down_rep_recv.initial_hdr.rad[0],
2246
				      mgr->down_rep_recv.msg[0]);
2251
				      mgr->down_rep_recv.msg[0]);
2247
			drm_dp_put_mst_branch_device(mstb);
2252
			drm_dp_put_mst_branch_device(mstb);
2248
			memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2253
			memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2249
			return 0;
2254
			return 0;
2250
		}
2255
		}
2251
 
2256
 
2252
		drm_dp_sideband_parse_reply(&mgr->down_rep_recv, &txmsg->reply);
2257
		drm_dp_sideband_parse_reply(&mgr->down_rep_recv, &txmsg->reply);
2253
		if (txmsg->reply.reply_type == 1) {
2258
		if (txmsg->reply.reply_type == 1) {
2254
			DRM_DEBUG_KMS("Got NAK reply: req 0x%02x, reason 0x%02x, nak data 0x%02x\n", txmsg->reply.req_type, txmsg->reply.u.nak.reason, txmsg->reply.u.nak.nak_data);
2259
			DRM_DEBUG_KMS("Got NAK reply: req 0x%02x, reason 0x%02x, nak data 0x%02x\n", txmsg->reply.req_type, txmsg->reply.u.nak.reason, txmsg->reply.u.nak.nak_data);
2255
		}
2260
		}
2256
 
2261
 
2257
		memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2262
		memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2258
		drm_dp_put_mst_branch_device(mstb);
2263
		drm_dp_put_mst_branch_device(mstb);
2259
 
2264
 
2260
		mutex_lock(&mgr->qlock);
2265
		mutex_lock(&mgr->qlock);
2261
		txmsg->state = DRM_DP_SIDEBAND_TX_RX;
2266
		txmsg->state = DRM_DP_SIDEBAND_TX_RX;
2262
		mstb->tx_slots[slot] = NULL;
2267
		mstb->tx_slots[slot] = NULL;
2263
		mutex_unlock(&mgr->qlock);
2268
		mutex_unlock(&mgr->qlock);
2264
 
2269
 
2265
//       wake_up(&mgr->tx_waitq);
2270
//       wake_up(&mgr->tx_waitq);
2266
	}
2271
	}
2267
	return ret;
2272
	return ret;
2268
}
2273
}
2269
 
2274
 
2270
static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
2275
static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
2271
{
2276
{
2272
	int ret = 0;
2277
	int ret = 0;
2273
	drm_dp_get_one_sb_msg(mgr, true);
2278
	drm_dp_get_one_sb_msg(mgr, true);
2274
 
2279
 
2275
	if (mgr->up_req_recv.have_eomt) {
2280
	if (mgr->up_req_recv.have_eomt) {
2276
		struct drm_dp_sideband_msg_req_body msg;
2281
		struct drm_dp_sideband_msg_req_body msg;
2277
		struct drm_dp_mst_branch *mstb = NULL;
2282
		struct drm_dp_mst_branch *mstb = NULL;
2278
		bool seqno;
2283
		bool seqno;
2279
 
2284
 
2280
		if (!mgr->up_req_recv.initial_hdr.broadcast) {
2285
		if (!mgr->up_req_recv.initial_hdr.broadcast) {
2281
		mstb = drm_dp_get_mst_branch_device(mgr,
2286
		mstb = drm_dp_get_mst_branch_device(mgr,
2282
						    mgr->up_req_recv.initial_hdr.lct,
2287
						    mgr->up_req_recv.initial_hdr.lct,
2283
						    mgr->up_req_recv.initial_hdr.rad);
2288
						    mgr->up_req_recv.initial_hdr.rad);
2284
		if (!mstb) {
2289
		if (!mstb) {
2285
			DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
2290
			DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
2286
			memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2291
			memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2287
			return 0;
2292
			return 0;
2288
		}
2293
		}
2289
		}
2294
		}
2290
 
2295
 
2291
		seqno = mgr->up_req_recv.initial_hdr.seqno;
2296
		seqno = mgr->up_req_recv.initial_hdr.seqno;
2292
		drm_dp_sideband_parse_req(&mgr->up_req_recv, &msg);
2297
		drm_dp_sideband_parse_req(&mgr->up_req_recv, &msg);
2293
 
2298
 
2294
		if (msg.req_type == DP_CONNECTION_STATUS_NOTIFY) {
2299
		if (msg.req_type == DP_CONNECTION_STATUS_NOTIFY) {
2295
			drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, msg.req_type, seqno, false);
2300
			drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, msg.req_type, seqno, false);
2296
 
2301
 
2297
			if (!mstb)
2302
			if (!mstb)
2298
				mstb = drm_dp_get_mst_branch_device_by_guid(mgr, msg.u.conn_stat.guid);
2303
				mstb = drm_dp_get_mst_branch_device_by_guid(mgr, msg.u.conn_stat.guid);
2299
 
2304
 
2300
			if (!mstb) {
2305
			if (!mstb) {
2301
				DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
2306
				DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
2302
				memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2307
				memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2303
				return 0;
2308
				return 0;
2304
			}
2309
			}
2305
 
2310
 
2306
			drm_dp_update_port(mstb, &msg.u.conn_stat);
2311
			drm_dp_update_port(mstb, &msg.u.conn_stat);
2307
 
2312
 
2308
			DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n", msg.u.conn_stat.port_number, msg.u.conn_stat.legacy_device_plug_status, msg.u.conn_stat.displayport_device_plug_status, msg.u.conn_stat.message_capability_status, msg.u.conn_stat.input_port, msg.u.conn_stat.peer_device_type);
2313
			DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n", msg.u.conn_stat.port_number, msg.u.conn_stat.legacy_device_plug_status, msg.u.conn_stat.displayport_device_plug_status, msg.u.conn_stat.message_capability_status, msg.u.conn_stat.input_port, msg.u.conn_stat.peer_device_type);
2309
			(*mgr->cbs->hotplug)(mgr);
2314
			(*mgr->cbs->hotplug)(mgr);
2310
 
2315
 
2311
		} else if (msg.req_type == DP_RESOURCE_STATUS_NOTIFY) {
2316
		} else if (msg.req_type == DP_RESOURCE_STATUS_NOTIFY) {
2312
			drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, msg.req_type, seqno, false);
2317
			drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, msg.req_type, seqno, false);
2313
			if (!mstb)
2318
			if (!mstb)
2314
				mstb = drm_dp_get_mst_branch_device_by_guid(mgr, msg.u.resource_stat.guid);
2319
				mstb = drm_dp_get_mst_branch_device_by_guid(mgr, msg.u.resource_stat.guid);
2315
 
2320
 
2316
			if (!mstb) {
2321
			if (!mstb) {
2317
				DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
2322
				DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
2318
				memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2323
				memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2319
				return 0;
2324
				return 0;
2320
			}
2325
			}
2321
 
2326
 
2322
			DRM_DEBUG_KMS("Got RSN: pn: %d avail_pbn %d\n", msg.u.resource_stat.port_number, msg.u.resource_stat.available_pbn);
2327
			DRM_DEBUG_KMS("Got RSN: pn: %d avail_pbn %d\n", msg.u.resource_stat.port_number, msg.u.resource_stat.available_pbn);
2323
		}
2328
		}
2324
 
2329
 
2325
		drm_dp_put_mst_branch_device(mstb);
2330
		drm_dp_put_mst_branch_device(mstb);
2326
		memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2331
		memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2327
	}
2332
	}
2328
	return ret;
2333
	return ret;
2329
}
2334
}
2330
 
2335
 
2331
/**
2336
/**
2332
 * drm_dp_mst_hpd_irq() - MST hotplug IRQ notify
2337
 * drm_dp_mst_hpd_irq() - MST hotplug IRQ notify
2333
 * @mgr: manager to notify irq for.
2338
 * @mgr: manager to notify irq for.
2334
 * @esi: 4 bytes from SINK_COUNT_ESI
2339
 * @esi: 4 bytes from SINK_COUNT_ESI
2335
 * @handled: whether the hpd interrupt was consumed or not
2340
 * @handled: whether the hpd interrupt was consumed or not
2336
 *
2341
 *
2337
 * This should be called from the driver when it detects a short IRQ,
2342
 * This should be called from the driver when it detects a short IRQ,
2338
 * along with the value of the DEVICE_SERVICE_IRQ_VECTOR_ESI0. The
2343
 * along with the value of the DEVICE_SERVICE_IRQ_VECTOR_ESI0. The
2339
 * topology manager will process the sideband messages received as a result
2344
 * topology manager will process the sideband messages received as a result
2340
 * of this.
2345
 * of this.
2341
 */
2346
 */
2342
int drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr *mgr, u8 *esi, bool *handled)
2347
int drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr *mgr, u8 *esi, bool *handled)
2343
{
2348
{
2344
	int ret = 0;
2349
	int ret = 0;
2345
	int sc;
2350
	int sc;
2346
	*handled = false;
2351
	*handled = false;
2347
	sc = esi[0] & 0x3f;
2352
	sc = esi[0] & 0x3f;
2348
 
2353
 
2349
	if (sc != mgr->sink_count) {
2354
	if (sc != mgr->sink_count) {
2350
		mgr->sink_count = sc;
2355
		mgr->sink_count = sc;
2351
		*handled = true;
2356
		*handled = true;
2352
	}
2357
	}
2353
 
2358
 
2354
	if (esi[1] & DP_DOWN_REP_MSG_RDY) {
2359
	if (esi[1] & DP_DOWN_REP_MSG_RDY) {
2355
		ret = drm_dp_mst_handle_down_rep(mgr);
2360
		ret = drm_dp_mst_handle_down_rep(mgr);
2356
		*handled = true;
2361
		*handled = true;
2357
	}
2362
	}
2358
 
2363
 
2359
	if (esi[1] & DP_UP_REQ_MSG_RDY) {
2364
	if (esi[1] & DP_UP_REQ_MSG_RDY) {
2360
		ret |= drm_dp_mst_handle_up_req(mgr);
2365
		ret |= drm_dp_mst_handle_up_req(mgr);
2361
		*handled = true;
2366
		*handled = true;
2362
	}
2367
	}
2363
 
2368
 
2364
	drm_dp_mst_kick_tx(mgr);
2369
	drm_dp_mst_kick_tx(mgr);
2365
	return ret;
2370
	return ret;
2366
}
2371
}
2367
EXPORT_SYMBOL(drm_dp_mst_hpd_irq);
2372
EXPORT_SYMBOL(drm_dp_mst_hpd_irq);
2368
 
2373
 
2369
/**
2374
/**
2370
 * drm_dp_mst_detect_port() - get connection status for an MST port
2375
 * drm_dp_mst_detect_port() - get connection status for an MST port
2371
 * @mgr: manager for this port
2376
 * @mgr: manager for this port
2372
 * @port: unverified pointer to a port
2377
 * @port: unverified pointer to a port
2373
 *
2378
 *
2374
 * This returns the current connection state for a port. It validates the
2379
 * This returns the current connection state for a port. It validates the
2375
 * port pointer still exists so the caller doesn't require a reference
2380
 * port pointer still exists so the caller doesn't require a reference
2376
 */
2381
 */
2377
enum drm_connector_status drm_dp_mst_detect_port(struct drm_connector *connector,
2382
enum drm_connector_status drm_dp_mst_detect_port(struct drm_connector *connector,
2378
						 struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
2383
						 struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
2379
{
2384
{
2380
	enum drm_connector_status status = connector_status_disconnected;
2385
	enum drm_connector_status status = connector_status_disconnected;
2381
 
2386
 
2382
	/* we need to search for the port in the mgr in case its gone */
2387
	/* we need to search for the port in the mgr in case its gone */
2383
	port = drm_dp_get_validated_port_ref(mgr, port);
2388
	port = drm_dp_get_validated_port_ref(mgr, port);
2384
	if (!port)
2389
	if (!port)
2385
		return connector_status_disconnected;
2390
		return connector_status_disconnected;
2386
 
2391
 
2387
	if (!port->ddps)
2392
	if (!port->ddps)
2388
		goto out;
2393
		goto out;
2389
 
2394
 
2390
	switch (port->pdt) {
2395
	switch (port->pdt) {
2391
	case DP_PEER_DEVICE_NONE:
2396
	case DP_PEER_DEVICE_NONE:
2392
	case DP_PEER_DEVICE_MST_BRANCHING:
2397
	case DP_PEER_DEVICE_MST_BRANCHING:
2393
		break;
2398
		break;
2394
 
2399
 
2395
	case DP_PEER_DEVICE_SST_SINK:
2400
	case DP_PEER_DEVICE_SST_SINK:
2396
		status = connector_status_connected;
2401
		status = connector_status_connected;
2397
		/* for logical ports - cache the EDID */
2402
		/* for logical ports - cache the EDID */
2398
		if (port->port_num >= 8 && !port->cached_edid) {
2403
		if (port->port_num >= 8 && !port->cached_edid) {
2399
			port->cached_edid = drm_get_edid(connector, &port->aux.ddc);
2404
			port->cached_edid = drm_get_edid(connector, &port->aux.ddc);
2400
		}
2405
		}
2401
		break;
2406
		break;
2402
	case DP_PEER_DEVICE_DP_LEGACY_CONV:
2407
	case DP_PEER_DEVICE_DP_LEGACY_CONV:
2403
		if (port->ldps)
2408
		if (port->ldps)
2404
			status = connector_status_connected;
2409
			status = connector_status_connected;
2405
		break;
2410
		break;
2406
	}
2411
	}
2407
out:
2412
out:
2408
	drm_dp_put_port(port);
2413
	drm_dp_put_port(port);
2409
	return status;
2414
	return status;
2410
}
2415
}
2411
EXPORT_SYMBOL(drm_dp_mst_detect_port);
2416
EXPORT_SYMBOL(drm_dp_mst_detect_port);
2412
 
2417
 
2413
/**
2418
/**
-
 
2419
 * drm_dp_mst_port_has_audio() - Check whether port has audio capability or not
-
 
2420
 * @mgr: manager for this port
-
 
2421
 * @port: unverified pointer to a port.
-
 
2422
 *
-
 
2423
 * This returns whether the port supports audio or not.
-
 
2424
 */
-
 
2425
bool drm_dp_mst_port_has_audio(struct drm_dp_mst_topology_mgr *mgr,
-
 
2426
					struct drm_dp_mst_port *port)
-
 
2427
{
-
 
2428
	bool ret = false;
-
 
2429
 
-
 
2430
	port = drm_dp_get_validated_port_ref(mgr, port);
-
 
2431
	if (!port)
-
 
2432
		return ret;
-
 
2433
	ret = port->has_audio;
-
 
2434
	drm_dp_put_port(port);
-
 
2435
	return ret;
-
 
2436
}
-
 
2437
EXPORT_SYMBOL(drm_dp_mst_port_has_audio);
-
 
2438
 
-
 
2439
/**
2414
 * drm_dp_mst_get_edid() - get EDID for an MST port
2440
 * drm_dp_mst_get_edid() - get EDID for an MST port
2415
 * @connector: toplevel connector to get EDID for
2441
 * @connector: toplevel connector to get EDID for
2416
 * @mgr: manager for this port
2442
 * @mgr: manager for this port
2417
 * @port: unverified pointer to a port.
2443
 * @port: unverified pointer to a port.
2418
 *
2444
 *
2419
 * This returns an EDID for the port connected to a connector,
2445
 * This returns an EDID for the port connected to a connector,
2420
 * It validates the pointer still exists so the caller doesn't require a
2446
 * It validates the pointer still exists so the caller doesn't require a
2421
 * reference.
2447
 * reference.
2422
 */
2448
 */
2423
struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
2449
struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
2424
{
2450
{
2425
	struct edid *edid = NULL;
2451
	struct edid *edid = NULL;
2426
 
2452
 
2427
	/* we need to search for the port in the mgr in case its gone */
2453
	/* we need to search for the port in the mgr in case its gone */
2428
	port = drm_dp_get_validated_port_ref(mgr, port);
2454
	port = drm_dp_get_validated_port_ref(mgr, port);
2429
	if (!port)
2455
	if (!port)
2430
		return NULL;
2456
		return NULL;
2431
 
2457
 
2432
	if (port->cached_edid)
2458
	if (port->cached_edid)
2433
		edid = drm_edid_duplicate(port->cached_edid);
2459
		edid = drm_edid_duplicate(port->cached_edid);
2434
	else {
2460
	else {
2435
		edid = drm_get_edid(connector, &port->aux.ddc);
2461
		edid = drm_get_edid(connector, &port->aux.ddc);
2436
		drm_mode_connector_set_tile_property(connector);
2462
		drm_mode_connector_set_tile_property(connector);
2437
	}
2463
	}
-
 
2464
	port->has_audio = drm_detect_monitor_audio(edid);
2438
	drm_dp_put_port(port);
2465
	drm_dp_put_port(port);
2439
	return edid;
2466
	return edid;
2440
}
2467
}
2441
EXPORT_SYMBOL(drm_dp_mst_get_edid);
2468
EXPORT_SYMBOL(drm_dp_mst_get_edid);
2442
 
2469
 
2443
/**
2470
/**
2444
 * drm_dp_find_vcpi_slots() - find slots for this PBN value
2471
 * drm_dp_find_vcpi_slots() - find slots for this PBN value
2445
 * @mgr: manager to use
2472
 * @mgr: manager to use
2446
 * @pbn: payload bandwidth to convert into slots.
2473
 * @pbn: payload bandwidth to convert into slots.
2447
 */
2474
 */
2448
int drm_dp_find_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr,
2475
int drm_dp_find_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr,
2449
			   int pbn)
2476
			   int pbn)
2450
{
2477
{
2451
	int num_slots;
2478
	int num_slots;
2452
 
2479
 
2453
	num_slots = DIV_ROUND_UP(pbn, mgr->pbn_div);
2480
	num_slots = DIV_ROUND_UP(pbn, mgr->pbn_div);
2454
 
2481
 
2455
	if (num_slots > mgr->avail_slots)
2482
	if (num_slots > mgr->avail_slots)
2456
		return -ENOSPC;
2483
		return -ENOSPC;
2457
	return num_slots;
2484
	return num_slots;
2458
}
2485
}
2459
EXPORT_SYMBOL(drm_dp_find_vcpi_slots);
2486
EXPORT_SYMBOL(drm_dp_find_vcpi_slots);
2460
 
2487
 
2461
static int drm_dp_init_vcpi(struct drm_dp_mst_topology_mgr *mgr,
2488
static int drm_dp_init_vcpi(struct drm_dp_mst_topology_mgr *mgr,
2462
			    struct drm_dp_vcpi *vcpi, int pbn)
2489
			    struct drm_dp_vcpi *vcpi, int pbn)
2463
{
2490
{
2464
	int num_slots;
2491
	int num_slots;
2465
	int ret;
2492
	int ret;
2466
 
2493
 
2467
	num_slots = DIV_ROUND_UP(pbn, mgr->pbn_div);
2494
	num_slots = DIV_ROUND_UP(pbn, mgr->pbn_div);
2468
 
2495
 
2469
	if (num_slots > mgr->avail_slots)
2496
	if (num_slots > mgr->avail_slots)
2470
		return -ENOSPC;
2497
		return -ENOSPC;
2471
 
2498
 
2472
	vcpi->pbn = pbn;
2499
	vcpi->pbn = pbn;
2473
	vcpi->aligned_pbn = num_slots * mgr->pbn_div;
2500
	vcpi->aligned_pbn = num_slots * mgr->pbn_div;
2474
	vcpi->num_slots = num_slots;
2501
	vcpi->num_slots = num_slots;
2475
 
2502
 
2476
	ret = drm_dp_mst_assign_payload_id(mgr, vcpi);
2503
	ret = drm_dp_mst_assign_payload_id(mgr, vcpi);
2477
	if (ret < 0)
2504
	if (ret < 0)
2478
		return ret;
2505
		return ret;
2479
	return 0;
2506
	return 0;
2480
}
2507
}
2481
 
2508
 
2482
/**
2509
/**
2483
 * drm_dp_mst_allocate_vcpi() - Allocate a virtual channel
2510
 * drm_dp_mst_allocate_vcpi() - Allocate a virtual channel
2484
 * @mgr: manager for this port
2511
 * @mgr: manager for this port
2485
 * @port: port to allocate a virtual channel for.
2512
 * @port: port to allocate a virtual channel for.
2486
 * @pbn: payload bandwidth number to request
2513
 * @pbn: payload bandwidth number to request
2487
 * @slots: returned number of slots for this PBN.
2514
 * @slots: returned number of slots for this PBN.
2488
 */
2515
 */
2489
bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, int pbn, int *slots)
2516
bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, int pbn, int *slots)
2490
{
2517
{
2491
	int ret;
2518
	int ret;
2492
 
2519
 
2493
	port = drm_dp_get_validated_port_ref(mgr, port);
2520
	port = drm_dp_get_validated_port_ref(mgr, port);
2494
	if (!port)
2521
	if (!port)
2495
		return false;
2522
		return false;
2496
 
2523
 
2497
	if (port->vcpi.vcpi > 0) {
2524
	if (port->vcpi.vcpi > 0) {
2498
		DRM_DEBUG_KMS("payload: vcpi %d already allocated for pbn %d - requested pbn %d\n", port->vcpi.vcpi, port->vcpi.pbn, pbn);
2525
		DRM_DEBUG_KMS("payload: vcpi %d already allocated for pbn %d - requested pbn %d\n", port->vcpi.vcpi, port->vcpi.pbn, pbn);
2499
		if (pbn == port->vcpi.pbn) {
2526
		if (pbn == port->vcpi.pbn) {
2500
			*slots = port->vcpi.num_slots;
2527
			*slots = port->vcpi.num_slots;
2501
			drm_dp_put_port(port);
2528
			drm_dp_put_port(port);
2502
			return true;
2529
			return true;
2503
		}
2530
		}
2504
	}
2531
	}
2505
 
2532
 
2506
	ret = drm_dp_init_vcpi(mgr, &port->vcpi, pbn);
2533
	ret = drm_dp_init_vcpi(mgr, &port->vcpi, pbn);
2507
	if (ret) {
2534
	if (ret) {
2508
		DRM_DEBUG_KMS("failed to init vcpi %d %d %d\n", DIV_ROUND_UP(pbn, mgr->pbn_div), mgr->avail_slots, ret);
2535
		DRM_DEBUG_KMS("failed to init vcpi %d %d %d\n", DIV_ROUND_UP(pbn, mgr->pbn_div), mgr->avail_slots, ret);
2509
		goto out;
2536
		goto out;
2510
	}
2537
	}
2511
	DRM_DEBUG_KMS("initing vcpi for %d %d\n", pbn, port->vcpi.num_slots);
2538
	DRM_DEBUG_KMS("initing vcpi for %d %d\n", pbn, port->vcpi.num_slots);
2512
	*slots = port->vcpi.num_slots;
2539
	*slots = port->vcpi.num_slots;
2513
 
2540
 
2514
	drm_dp_put_port(port);
2541
	drm_dp_put_port(port);
2515
	return true;
2542
	return true;
2516
out:
2543
out:
2517
	return false;
2544
	return false;
2518
}
2545
}
2519
EXPORT_SYMBOL(drm_dp_mst_allocate_vcpi);
2546
EXPORT_SYMBOL(drm_dp_mst_allocate_vcpi);
2520
 
2547
 
2521
int drm_dp_mst_get_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
2548
int drm_dp_mst_get_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
2522
{
2549
{
2523
	int slots = 0;
2550
	int slots = 0;
2524
	port = drm_dp_get_validated_port_ref(mgr, port);
2551
	port = drm_dp_get_validated_port_ref(mgr, port);
2525
	if (!port)
2552
	if (!port)
2526
		return slots;
2553
		return slots;
2527
 
2554
 
2528
	slots = port->vcpi.num_slots;
2555
	slots = port->vcpi.num_slots;
2529
	drm_dp_put_port(port);
2556
	drm_dp_put_port(port);
2530
	return slots;
2557
	return slots;
2531
}
2558
}
2532
EXPORT_SYMBOL(drm_dp_mst_get_vcpi_slots);
2559
EXPORT_SYMBOL(drm_dp_mst_get_vcpi_slots);
2533
 
2560
 
2534
/**
2561
/**
2535
 * drm_dp_mst_reset_vcpi_slots() - Reset number of slots to 0 for VCPI
2562
 * drm_dp_mst_reset_vcpi_slots() - Reset number of slots to 0 for VCPI
2536
 * @mgr: manager for this port
2563
 * @mgr: manager for this port
2537
 * @port: unverified pointer to a port.
2564
 * @port: unverified pointer to a port.
2538
 *
2565
 *
2539
 * This just resets the number of slots for the ports VCPI for later programming.
2566
 * This just resets the number of slots for the ports VCPI for later programming.
2540
 */
2567
 */
2541
void drm_dp_mst_reset_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
2568
void drm_dp_mst_reset_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
2542
{
2569
{
2543
	port = drm_dp_get_validated_port_ref(mgr, port);
2570
	port = drm_dp_get_validated_port_ref(mgr, port);
2544
	if (!port)
2571
	if (!port)
2545
		return;
2572
		return;
2546
	port->vcpi.num_slots = 0;
2573
	port->vcpi.num_slots = 0;
2547
	drm_dp_put_port(port);
2574
	drm_dp_put_port(port);
2548
}
2575
}
2549
EXPORT_SYMBOL(drm_dp_mst_reset_vcpi_slots);
2576
EXPORT_SYMBOL(drm_dp_mst_reset_vcpi_slots);
2550
 
2577
 
2551
/**
2578
/**
2552
 * drm_dp_mst_deallocate_vcpi() - deallocate a VCPI
2579
 * drm_dp_mst_deallocate_vcpi() - deallocate a VCPI
2553
 * @mgr: manager for this port
2580
 * @mgr: manager for this port
2554
 * @port: unverified port to deallocate vcpi for
2581
 * @port: unverified port to deallocate vcpi for
2555
 */
2582
 */
2556
void drm_dp_mst_deallocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
2583
void drm_dp_mst_deallocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
2557
{
2584
{
2558
	port = drm_dp_get_validated_port_ref(mgr, port);
2585
	port = drm_dp_get_validated_port_ref(mgr, port);
2559
	if (!port)
2586
	if (!port)
2560
		return;
2587
		return;
2561
 
2588
 
2562
	drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
2589
	drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
2563
	port->vcpi.num_slots = 0;
2590
	port->vcpi.num_slots = 0;
2564
	port->vcpi.pbn = 0;
2591
	port->vcpi.pbn = 0;
2565
	port->vcpi.aligned_pbn = 0;
2592
	port->vcpi.aligned_pbn = 0;
2566
	port->vcpi.vcpi = 0;
2593
	port->vcpi.vcpi = 0;
2567
	drm_dp_put_port(port);
2594
	drm_dp_put_port(port);
2568
}
2595
}
2569
EXPORT_SYMBOL(drm_dp_mst_deallocate_vcpi);
2596
EXPORT_SYMBOL(drm_dp_mst_deallocate_vcpi);
2570
 
2597
 
2571
static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
2598
static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
2572
				     int id, struct drm_dp_payload *payload)
2599
				     int id, struct drm_dp_payload *payload)
2573
{
2600
{
2574
	u8 payload_alloc[3], status;
2601
	u8 payload_alloc[3], status;
2575
	int ret;
2602
	int ret;
2576
	int retries = 0;
2603
	int retries = 0;
2577
 
2604
 
2578
	drm_dp_dpcd_writeb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS,
2605
	drm_dp_dpcd_writeb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS,
2579
			   DP_PAYLOAD_TABLE_UPDATED);
2606
			   DP_PAYLOAD_TABLE_UPDATED);
2580
 
2607
 
2581
	payload_alloc[0] = id;
2608
	payload_alloc[0] = id;
2582
	payload_alloc[1] = payload->start_slot;
2609
	payload_alloc[1] = payload->start_slot;
2583
	payload_alloc[2] = payload->num_slots;
2610
	payload_alloc[2] = payload->num_slots;
2584
 
2611
 
2585
	ret = drm_dp_dpcd_write(mgr->aux, DP_PAYLOAD_ALLOCATE_SET, payload_alloc, 3);
2612
	ret = drm_dp_dpcd_write(mgr->aux, DP_PAYLOAD_ALLOCATE_SET, payload_alloc, 3);
2586
	if (ret != 3) {
2613
	if (ret != 3) {
2587
		DRM_DEBUG_KMS("failed to write payload allocation %d\n", ret);
2614
		DRM_DEBUG_KMS("failed to write payload allocation %d\n", ret);
2588
		goto fail;
2615
		goto fail;
2589
	}
2616
	}
2590
 
2617
 
2591
retry:
2618
retry:
2592
	ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
2619
	ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
2593
	if (ret < 0) {
2620
	if (ret < 0) {
2594
		DRM_DEBUG_KMS("failed to read payload table status %d\n", ret);
2621
		DRM_DEBUG_KMS("failed to read payload table status %d\n", ret);
2595
		goto fail;
2622
		goto fail;
2596
	}
2623
	}
2597
 
2624
 
2598
	if (!(status & DP_PAYLOAD_TABLE_UPDATED)) {
2625
	if (!(status & DP_PAYLOAD_TABLE_UPDATED)) {
2599
		retries++;
2626
		retries++;
2600
		if (retries < 20) {
2627
		if (retries < 20) {
2601
			usleep_range(10000, 20000);
2628
			usleep_range(10000, 20000);
2602
			goto retry;
2629
			goto retry;
2603
		}
2630
		}
2604
		DRM_DEBUG_KMS("status not set after read payload table status %d\n", status);
2631
		DRM_DEBUG_KMS("status not set after read payload table status %d\n", status);
2605
		ret = -EINVAL;
2632
		ret = -EINVAL;
2606
		goto fail;
2633
		goto fail;
2607
	}
2634
	}
2608
	ret = 0;
2635
	ret = 0;
2609
fail:
2636
fail:
2610
	return ret;
2637
	return ret;
2611
}
2638
}
2612
 
2639
 
2613
 
2640
 
2614
/**
2641
/**
2615
 * drm_dp_check_act_status() - Check ACT handled status.
2642
 * drm_dp_check_act_status() - Check ACT handled status.
2616
 * @mgr: manager to use
2643
 * @mgr: manager to use
2617
 *
2644
 *
2618
 * Check the payload status bits in the DPCD for ACT handled completion.
2645
 * Check the payload status bits in the DPCD for ACT handled completion.
2619
 */
2646
 */
2620
int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr)
2647
int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr)
2621
{
2648
{
2622
	u8 status;
2649
	u8 status;
2623
	int ret;
2650
	int ret;
2624
	int count = 0;
2651
	int count = 0;
2625
 
2652
 
2626
	do {
2653
	do {
2627
		ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
2654
		ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
2628
 
2655
 
2629
		if (ret < 0) {
2656
		if (ret < 0) {
2630
			DRM_DEBUG_KMS("failed to read payload table status %d\n", ret);
2657
			DRM_DEBUG_KMS("failed to read payload table status %d\n", ret);
2631
			goto fail;
2658
			goto fail;
2632
		}
2659
		}
2633
 
2660
 
2634
		if (status & DP_PAYLOAD_ACT_HANDLED)
2661
		if (status & DP_PAYLOAD_ACT_HANDLED)
2635
			break;
2662
			break;
2636
		count++;
2663
		count++;
2637
		udelay(100);
2664
		udelay(100);
2638
 
2665
 
2639
	} while (count < 30);
2666
	} while (count < 30);
2640
 
2667
 
2641
	if (!(status & DP_PAYLOAD_ACT_HANDLED)) {
2668
	if (!(status & DP_PAYLOAD_ACT_HANDLED)) {
2642
		DRM_DEBUG_KMS("failed to get ACT bit %d after %d retries\n", status, count);
2669
		DRM_DEBUG_KMS("failed to get ACT bit %d after %d retries\n", status, count);
2643
		ret = -EINVAL;
2670
		ret = -EINVAL;
2644
		goto fail;
2671
		goto fail;
2645
	}
2672
	}
2646
	return 0;
2673
	return 0;
2647
fail:
2674
fail:
2648
	return ret;
2675
	return ret;
2649
}
2676
}
2650
EXPORT_SYMBOL(drm_dp_check_act_status);
2677
EXPORT_SYMBOL(drm_dp_check_act_status);
2651
 
2678
 
2652
/**
2679
/**
2653
 * drm_dp_calc_pbn_mode() - Calculate the PBN for a mode.
2680
 * drm_dp_calc_pbn_mode() - Calculate the PBN for a mode.
2654
 * @clock: dot clock for the mode
2681
 * @clock: dot clock for the mode
2655
 * @bpp: bpp for the mode.
2682
 * @bpp: bpp for the mode.
2656
 *
2683
 *
2657
 * This uses the formula in the spec to calculate the PBN value for a mode.
2684
 * This uses the formula in the spec to calculate the PBN value for a mode.
2658
 */
2685
 */
2659
int drm_dp_calc_pbn_mode(int clock, int bpp)
2686
int drm_dp_calc_pbn_mode(int clock, int bpp)
2660
{
2687
{
2661
	u64 kbps;
2688
	u64 kbps;
2662
	s64 peak_kbps;
2689
	s64 peak_kbps;
2663
	u32 numerator;
2690
	u32 numerator;
2664
	u32 denominator;
2691
	u32 denominator;
2665
 
2692
 
2666
	kbps = clock * bpp;
2693
	kbps = clock * bpp;
2667
 
2694
 
2668
	/*
2695
	/*
2669
	 * margin 5300ppm + 300ppm ~ 0.6% as per spec, factor is 1.006
2696
	 * margin 5300ppm + 300ppm ~ 0.6% as per spec, factor is 1.006
2670
	 * The unit of 54/64Mbytes/sec is an arbitrary unit chosen based on
2697
	 * The unit of 54/64Mbytes/sec is an arbitrary unit chosen based on
2671
	 * common multiplier to render an integer PBN for all link rate/lane
2698
	 * common multiplier to render an integer PBN for all link rate/lane
2672
	 * counts combinations
2699
	 * counts combinations
2673
	 * calculate
2700
	 * calculate
2674
	 * peak_kbps *= (1006/1000)
2701
	 * peak_kbps *= (1006/1000)
2675
	 * peak_kbps *= (64/54)
2702
	 * peak_kbps *= (64/54)
2676
	 * peak_kbps *= 8    convert to bytes
2703
	 * peak_kbps *= 8    convert to bytes
2677
	 */
2704
	 */
2678
 
2705
 
2679
	numerator = 64 * 1006;
2706
	numerator = 64 * 1006;
2680
	denominator = 54 * 8 * 1000 * 1000;
2707
	denominator = 54 * 8 * 1000 * 1000;
2681
 
2708
 
2682
	kbps *= numerator;
2709
	kbps *= numerator;
2683
	peak_kbps = drm_fixp_from_fraction(kbps, denominator);
2710
	peak_kbps = drm_fixp_from_fraction(kbps, denominator);
2684
 
2711
 
2685
	return drm_fixp2int_ceil(peak_kbps);
2712
	return drm_fixp2int_ceil(peak_kbps);
2686
}
2713
}
2687
EXPORT_SYMBOL(drm_dp_calc_pbn_mode);
2714
EXPORT_SYMBOL(drm_dp_calc_pbn_mode);
2688
 
2715
 
2689
static int test_calc_pbn_mode(void)
2716
static int test_calc_pbn_mode(void)
2690
{
2717
{
2691
	int ret;
2718
	int ret;
2692
	ret = drm_dp_calc_pbn_mode(154000, 30);
2719
	ret = drm_dp_calc_pbn_mode(154000, 30);
2693
	if (ret != 689) {
2720
	if (ret != 689) {
2694
		DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
2721
		DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
2695
				154000, 30, 689, ret);
2722
				154000, 30, 689, ret);
2696
		return -EINVAL;
2723
		return -EINVAL;
2697
	}
2724
	}
2698
	ret = drm_dp_calc_pbn_mode(234000, 30);
2725
	ret = drm_dp_calc_pbn_mode(234000, 30);
2699
	if (ret != 1047) {
2726
	if (ret != 1047) {
2700
		DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
2727
		DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
2701
				234000, 30, 1047, ret);
2728
				234000, 30, 1047, ret);
2702
		return -EINVAL;
2729
		return -EINVAL;
2703
	}
2730
	}
2704
	ret = drm_dp_calc_pbn_mode(297000, 24);
2731
	ret = drm_dp_calc_pbn_mode(297000, 24);
2705
	if (ret != 1063) {
2732
	if (ret != 1063) {
2706
		DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
2733
		DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
2707
				297000, 24, 1063, ret);
2734
				297000, 24, 1063, ret);
2708
		return -EINVAL;
2735
		return -EINVAL;
2709
	}
2736
	}
2710
	return 0;
2737
	return 0;
2711
}
2738
}
2712
 
2739
 
2713
/* we want to kick the TX after we've ack the up/down IRQs. */
2740
/* we want to kick the TX after we've ack the up/down IRQs. */
2714
static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr)
2741
static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr)
2715
{
2742
{
2716
//   queue_work(system_long_wq, &mgr->tx_work);
2743
//   queue_work(system_long_wq, &mgr->tx_work);
2717
}
2744
}
2718
 
2745
 
2719
static void drm_dp_mst_dump_mstb(struct seq_file *m,
2746
static void drm_dp_mst_dump_mstb(struct seq_file *m,
2720
				 struct drm_dp_mst_branch *mstb)
2747
				 struct drm_dp_mst_branch *mstb)
2721
{
2748
{
2722
	struct drm_dp_mst_port *port;
2749
	struct drm_dp_mst_port *port;
2723
	int tabs = mstb->lct;
2750
	int tabs = mstb->lct;
2724
	char prefix[10];
2751
	char prefix[10];
2725
	int i;
2752
	int i;
2726
 
2753
 
2727
	for (i = 0; i < tabs; i++)
2754
	for (i = 0; i < tabs; i++)
2728
		prefix[i] = '\t';
2755
		prefix[i] = '\t';
2729
	prefix[i] = '\0';
2756
	prefix[i] = '\0';
2730
 
2757
 
2731
//   seq_printf(m, "%smst: %p, %d\n", prefix, mstb, mstb->num_ports);
2758
//   seq_printf(m, "%smst: %p, %d\n", prefix, mstb, mstb->num_ports);
2732
//   list_for_each_entry(port, &mstb->ports, next) {
2759
//   list_for_each_entry(port, &mstb->ports, next) {
2733
//       seq_printf(m, "%sport: %d: ddps: %d ldps: %d, %p, conn: %p\n", prefix, port->port_num, port->ddps, port->ldps, port, port->connector);
2760
//       seq_printf(m, "%sport: %d: ddps: %d ldps: %d, %p, conn: %p\n", prefix, port->port_num, port->ddps, port->ldps, port, port->connector);
2734
//       if (port->mstb)
2761
//       if (port->mstb)
2735
//           drm_dp_mst_dump_mstb(m, port->mstb);
2762
//           drm_dp_mst_dump_mstb(m, port->mstb);
2736
//   }
2763
//   }
2737
}
2764
}
2738
 
2765
 
2739
static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
2766
static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
2740
				  char *buf)
2767
				  char *buf)
2741
{
2768
{
2742
	int ret;
2769
	int ret;
2743
	int i;
2770
	int i;
2744
	for (i = 0; i < 4; i++) {
2771
	for (i = 0; i < 4; i++) {
2745
		ret = drm_dp_dpcd_read(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS + (i * 16), &buf[i * 16], 16);
2772
		ret = drm_dp_dpcd_read(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS + (i * 16), &buf[i * 16], 16);
2746
		if (ret != 16)
2773
		if (ret != 16)
2747
			break;
2774
			break;
2748
	}
2775
	}
2749
	if (i == 4)
2776
	if (i == 4)
2750
		return true;
2777
		return true;
2751
	return false;
2778
	return false;
2752
}
2779
}
2753
 
2780
 
2754
/**
2781
/**
2755
 * drm_dp_mst_dump_topology(): dump topology to seq file.
2782
 * drm_dp_mst_dump_topology(): dump topology to seq file.
2756
 * @m: seq_file to dump output to
2783
 * @m: seq_file to dump output to
2757
 * @mgr: manager to dump current topology for.
2784
 * @mgr: manager to dump current topology for.
2758
 *
2785
 *
2759
 * helper to dump MST topology to a seq file for debugfs.
2786
 * helper to dump MST topology to a seq file for debugfs.
2760
 */
2787
 */
2761
void drm_dp_mst_dump_topology(struct seq_file *m,
2788
void drm_dp_mst_dump_topology(struct seq_file *m,
2762
			      struct drm_dp_mst_topology_mgr *mgr)
2789
			      struct drm_dp_mst_topology_mgr *mgr)
2763
{
2790
{
2764
	int i;
2791
	int i;
2765
	struct drm_dp_mst_port *port;
2792
	struct drm_dp_mst_port *port;
2766
	mutex_lock(&mgr->lock);
2793
	mutex_lock(&mgr->lock);
2767
	if (mgr->mst_primary)
2794
	if (mgr->mst_primary)
2768
		drm_dp_mst_dump_mstb(m, mgr->mst_primary);
2795
		drm_dp_mst_dump_mstb(m, mgr->mst_primary);
2769
 
2796
 
2770
	/* dump VCPIs */
2797
	/* dump VCPIs */
2771
	mutex_unlock(&mgr->lock);
2798
	mutex_unlock(&mgr->lock);
2772
 
2799
 
2773
 
2800
 
2774
 
2801
 
2775
}
2802
}
2776
EXPORT_SYMBOL(drm_dp_mst_dump_topology);
2803
EXPORT_SYMBOL(drm_dp_mst_dump_topology);
2777
 
2804
 
2778
static void drm_dp_tx_work(struct work_struct *work)
2805
static void drm_dp_tx_work(struct work_struct *work)
2779
{
2806
{
2780
	struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, tx_work);
2807
	struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, tx_work);
2781
 
2808
 
2782
	mutex_lock(&mgr->qlock);
2809
	mutex_lock(&mgr->qlock);
2783
	if (mgr->tx_down_in_progress)
2810
	if (mgr->tx_down_in_progress)
2784
		process_single_down_tx_qlock(mgr);
2811
		process_single_down_tx_qlock(mgr);
2785
	mutex_unlock(&mgr->qlock);
2812
	mutex_unlock(&mgr->qlock);
2786
}
2813
}
2787
 
2814
 
2788
static void drm_dp_free_mst_port(struct kref *kref)
2815
static void drm_dp_free_mst_port(struct kref *kref)
2789
{
2816
{
2790
	struct drm_dp_mst_port *port = container_of(kref, struct drm_dp_mst_port, kref);
2817
	struct drm_dp_mst_port *port = container_of(kref, struct drm_dp_mst_port, kref);
2791
	kref_put(&port->parent->kref, drm_dp_free_mst_branch_device);
2818
	kref_put(&port->parent->kref, drm_dp_free_mst_branch_device);
2792
	kfree(port);
2819
	kfree(port);
2793
}
2820
}
2794
/**
2821
/**
2795
 * drm_dp_mst_topology_mgr_init - initialise a topology manager
2822
 * drm_dp_mst_topology_mgr_init - initialise a topology manager
2796
 * @mgr: manager struct to initialise
2823
 * @mgr: manager struct to initialise
2797
 * @dev: device providing this structure - for i2c addition.
2824
 * @dev: device providing this structure - for i2c addition.
2798
 * @aux: DP helper aux channel to talk to this device
2825
 * @aux: DP helper aux channel to talk to this device
2799
 * @max_dpcd_transaction_bytes: hw specific DPCD transaction limit
2826
 * @max_dpcd_transaction_bytes: hw specific DPCD transaction limit
2800
 * @max_payloads: maximum number of payloads this GPU can source
2827
 * @max_payloads: maximum number of payloads this GPU can source
2801
 * @conn_base_id: the connector object ID the MST device is connected to.
2828
 * @conn_base_id: the connector object ID the MST device is connected to.
2802
 *
2829
 *
2803
 * Return 0 for success, or negative error code on failure
2830
 * Return 0 for success, or negative error code on failure
2804
 */
2831
 */
2805
int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
2832
int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
2806
				 struct device *dev, struct drm_dp_aux *aux,
2833
				 struct device *dev, struct drm_dp_aux *aux,
2807
				 int max_dpcd_transaction_bytes,
2834
				 int max_dpcd_transaction_bytes,
2808
				 int max_payloads, int conn_base_id)
2835
				 int max_payloads, int conn_base_id)
2809
{
2836
{
2810
	mutex_init(&mgr->lock);
2837
	mutex_init(&mgr->lock);
2811
	mutex_init(&mgr->qlock);
2838
	mutex_init(&mgr->qlock);
2812
	mutex_init(&mgr->payload_lock);
2839
	mutex_init(&mgr->payload_lock);
2813
	mutex_init(&mgr->destroy_connector_lock);
2840
	mutex_init(&mgr->destroy_connector_lock);
2814
	INIT_LIST_HEAD(&mgr->tx_msg_downq);
2841
	INIT_LIST_HEAD(&mgr->tx_msg_downq);
2815
	INIT_LIST_HEAD(&mgr->destroy_connector_list);
2842
	INIT_LIST_HEAD(&mgr->destroy_connector_list);
2816
	INIT_WORK(&mgr->work, drm_dp_mst_link_probe_work);
2843
	INIT_WORK(&mgr->work, drm_dp_mst_link_probe_work);
2817
	INIT_WORK(&mgr->tx_work, drm_dp_tx_work);
2844
	INIT_WORK(&mgr->tx_work, drm_dp_tx_work);
2818
	init_waitqueue_head(&mgr->tx_waitq);
2845
	init_waitqueue_head(&mgr->tx_waitq);
2819
	mgr->dev = dev;
2846
	mgr->dev = dev;
2820
	mgr->aux = aux;
2847
	mgr->aux = aux;
2821
	mgr->max_dpcd_transaction_bytes = max_dpcd_transaction_bytes;
2848
	mgr->max_dpcd_transaction_bytes = max_dpcd_transaction_bytes;
2822
	mgr->max_payloads = max_payloads;
2849
	mgr->max_payloads = max_payloads;
2823
	mgr->conn_base_id = conn_base_id;
2850
	mgr->conn_base_id = conn_base_id;
-
 
2851
	if (max_payloads + 1 > sizeof(mgr->payload_mask) * 8 ||
-
 
2852
	    max_payloads + 1 > sizeof(mgr->vcpi_mask) * 8)
-
 
2853
		return -EINVAL;
2824
	mgr->payloads = kcalloc(max_payloads, sizeof(struct drm_dp_payload), GFP_KERNEL);
2854
	mgr->payloads = kcalloc(max_payloads, sizeof(struct drm_dp_payload), GFP_KERNEL);
2825
	if (!mgr->payloads)
2855
	if (!mgr->payloads)
2826
		return -ENOMEM;
2856
		return -ENOMEM;
2827
	mgr->proposed_vcpis = kcalloc(max_payloads, sizeof(struct drm_dp_vcpi *), GFP_KERNEL);
2857
	mgr->proposed_vcpis = kcalloc(max_payloads, sizeof(struct drm_dp_vcpi *), GFP_KERNEL);
2828
	if (!mgr->proposed_vcpis)
2858
	if (!mgr->proposed_vcpis)
2829
		return -ENOMEM;
2859
		return -ENOMEM;
2830
	set_bit(0, &mgr->payload_mask);
2860
	set_bit(0, &mgr->payload_mask);
2831
	test_calc_pbn_mode();
2861
	if (test_calc_pbn_mode() < 0)
-
 
2862
		DRM_ERROR("MST PBN self-test failed\n");
-
 
2863
 
2832
	return 0;
2864
	return 0;
2833
}
2865
}
2834
EXPORT_SYMBOL(drm_dp_mst_topology_mgr_init);
2866
EXPORT_SYMBOL(drm_dp_mst_topology_mgr_init);
2835
 
2867
 
2836
/**
2868
/**
2837
 * drm_dp_mst_topology_mgr_destroy() - destroy topology manager.
2869
 * drm_dp_mst_topology_mgr_destroy() - destroy topology manager.
2838
 * @mgr: manager to destroy
2870
 * @mgr: manager to destroy
2839
 */
2871
 */
2840
void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr)
2872
void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr)
2841
{
2873
{
2842
	mutex_lock(&mgr->payload_lock);
2874
	mutex_lock(&mgr->payload_lock);
2843
	kfree(mgr->payloads);
2875
	kfree(mgr->payloads);
2844
	mgr->payloads = NULL;
2876
	mgr->payloads = NULL;
2845
	kfree(mgr->proposed_vcpis);
2877
	kfree(mgr->proposed_vcpis);
2846
	mgr->proposed_vcpis = NULL;
2878
	mgr->proposed_vcpis = NULL;
2847
	mutex_unlock(&mgr->payload_lock);
2879
	mutex_unlock(&mgr->payload_lock);
2848
	mgr->dev = NULL;
2880
	mgr->dev = NULL;
2849
	mgr->aux = NULL;
2881
	mgr->aux = NULL;
2850
}
2882
}
2851
EXPORT_SYMBOL(drm_dp_mst_topology_mgr_destroy);
2883
EXPORT_SYMBOL(drm_dp_mst_topology_mgr_destroy);
2852
 
2884
 
2853
/* I2C device */
2885
/* I2C device */
2854
static int drm_dp_mst_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
2886
static int drm_dp_mst_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
2855
			       int num)
2887
			       int num)
2856
{
2888
{
2857
	struct drm_dp_aux *aux = adapter->algo_data;
2889
	struct drm_dp_aux *aux = adapter->algo_data;
2858
	struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port, aux);
2890
	struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port, aux);
2859
	struct drm_dp_mst_branch *mstb;
2891
	struct drm_dp_mst_branch *mstb;
2860
	struct drm_dp_mst_topology_mgr *mgr = port->mgr;
2892
	struct drm_dp_mst_topology_mgr *mgr = port->mgr;
2861
	unsigned int i;
2893
	unsigned int i;
2862
	bool reading = false;
2894
	bool reading = false;
2863
	struct drm_dp_sideband_msg_req_body msg;
2895
	struct drm_dp_sideband_msg_req_body msg;
2864
	struct drm_dp_sideband_msg_tx *txmsg = NULL;
2896
	struct drm_dp_sideband_msg_tx *txmsg = NULL;
2865
	int ret;
2897
	int ret;
2866
 
2898
 
2867
	mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
2899
	mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
2868
	if (!mstb)
2900
	if (!mstb)
2869
		return -EREMOTEIO;
2901
		return -EREMOTEIO;
2870
 
2902
 
2871
	/* construct i2c msg */
2903
	/* construct i2c msg */
2872
	/* see if last msg is a read */
2904
	/* see if last msg is a read */
2873
	if (msgs[num - 1].flags & I2C_M_RD)
2905
	if (msgs[num - 1].flags & I2C_M_RD)
2874
		reading = true;
2906
		reading = true;
2875
 
2907
 
2876
	if (!reading || (num - 1 > DP_REMOTE_I2C_READ_MAX_TRANSACTIONS)) {
2908
	if (!reading || (num - 1 > DP_REMOTE_I2C_READ_MAX_TRANSACTIONS)) {
2877
		DRM_DEBUG_KMS("Unsupported I2C transaction for MST device\n");
2909
		DRM_DEBUG_KMS("Unsupported I2C transaction for MST device\n");
2878
		ret = -EIO;
2910
		ret = -EIO;
2879
		goto out;
2911
		goto out;
2880
	}
2912
	}
2881
 
2913
 
2882
	memset(&msg, 0, sizeof(msg));
2914
	memset(&msg, 0, sizeof(msg));
2883
	msg.req_type = DP_REMOTE_I2C_READ;
2915
	msg.req_type = DP_REMOTE_I2C_READ;
2884
	msg.u.i2c_read.num_transactions = num - 1;
2916
	msg.u.i2c_read.num_transactions = num - 1;
2885
	msg.u.i2c_read.port_number = port->port_num;
2917
	msg.u.i2c_read.port_number = port->port_num;
2886
	for (i = 0; i < num - 1; i++) {
2918
	for (i = 0; i < num - 1; i++) {
2887
		msg.u.i2c_read.transactions[i].i2c_dev_id = msgs[i].addr;
2919
		msg.u.i2c_read.transactions[i].i2c_dev_id = msgs[i].addr;
2888
		msg.u.i2c_read.transactions[i].num_bytes = msgs[i].len;
2920
		msg.u.i2c_read.transactions[i].num_bytes = msgs[i].len;
2889
		msg.u.i2c_read.transactions[i].bytes = msgs[i].buf;
2921
		msg.u.i2c_read.transactions[i].bytes = msgs[i].buf;
2890
	}
2922
	}
2891
	msg.u.i2c_read.read_i2c_device_id = msgs[num - 1].addr;
2923
	msg.u.i2c_read.read_i2c_device_id = msgs[num - 1].addr;
2892
	msg.u.i2c_read.num_bytes_read = msgs[num - 1].len;
2924
	msg.u.i2c_read.num_bytes_read = msgs[num - 1].len;
2893
 
2925
 
2894
	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
2926
	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
2895
	if (!txmsg) {
2927
	if (!txmsg) {
2896
		ret = -ENOMEM;
2928
		ret = -ENOMEM;
2897
		goto out;
2929
		goto out;
2898
	}
2930
	}
2899
 
2931
 
2900
	txmsg->dst = mstb;
2932
	txmsg->dst = mstb;
2901
	drm_dp_encode_sideband_req(&msg, txmsg);
2933
	drm_dp_encode_sideband_req(&msg, txmsg);
2902
 
2934
 
2903
	drm_dp_queue_down_tx(mgr, txmsg);
2935
	drm_dp_queue_down_tx(mgr, txmsg);
2904
 
2936
 
2905
	ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
2937
	ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
2906
	if (ret > 0) {
2938
	if (ret > 0) {
2907
 
2939
 
2908
		if (txmsg->reply.reply_type == 1) { /* got a NAK back */
2940
		if (txmsg->reply.reply_type == 1) { /* got a NAK back */
2909
			ret = -EREMOTEIO;
2941
			ret = -EREMOTEIO;
2910
			goto out;
2942
			goto out;
2911
		}
2943
		}
2912
		if (txmsg->reply.u.remote_i2c_read_ack.num_bytes != msgs[num - 1].len) {
2944
		if (txmsg->reply.u.remote_i2c_read_ack.num_bytes != msgs[num - 1].len) {
2913
			ret = -EIO;
2945
			ret = -EIO;
2914
			goto out;
2946
			goto out;
2915
		}
2947
		}
2916
		memcpy(msgs[num - 1].buf, txmsg->reply.u.remote_i2c_read_ack.bytes, msgs[num - 1].len);
2948
		memcpy(msgs[num - 1].buf, txmsg->reply.u.remote_i2c_read_ack.bytes, msgs[num - 1].len);
2917
		ret = num;
2949
		ret = num;
2918
	}
2950
	}
2919
out:
2951
out:
2920
	kfree(txmsg);
2952
	kfree(txmsg);
2921
	drm_dp_put_mst_branch_device(mstb);
2953
	drm_dp_put_mst_branch_device(mstb);
2922
	return ret;
2954
	return ret;
2923
}
2955
}
2924
 
2956
 
2925
static u32 drm_dp_mst_i2c_functionality(struct i2c_adapter *adapter)
2957
static u32 drm_dp_mst_i2c_functionality(struct i2c_adapter *adapter)
2926
{
2958
{
2927
	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
2959
	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
2928
	       I2C_FUNC_SMBUS_READ_BLOCK_DATA |
2960
	       I2C_FUNC_SMBUS_READ_BLOCK_DATA |
2929
	       I2C_FUNC_SMBUS_BLOCK_PROC_CALL |
2961
	       I2C_FUNC_SMBUS_BLOCK_PROC_CALL |
2930
	       I2C_FUNC_10BIT_ADDR;
2962
	       I2C_FUNC_10BIT_ADDR;
2931
}
2963
}
2932
 
2964
 
2933
static const struct i2c_algorithm drm_dp_mst_i2c_algo = {
2965
static const struct i2c_algorithm drm_dp_mst_i2c_algo = {
2934
	.functionality = drm_dp_mst_i2c_functionality,
2966
	.functionality = drm_dp_mst_i2c_functionality,
2935
	.master_xfer = drm_dp_mst_i2c_xfer,
2967
	.master_xfer = drm_dp_mst_i2c_xfer,
2936
};
2968
};
2937
 
2969
 
2938
/**
2970
/**
2939
 * drm_dp_mst_register_i2c_bus() - register an I2C adapter for I2C-over-AUX
2971
 * drm_dp_mst_register_i2c_bus() - register an I2C adapter for I2C-over-AUX
2940
 * @aux: DisplayPort AUX channel
2972
 * @aux: DisplayPort AUX channel
2941
 *
2973
 *
2942
 * Returns 0 on success or a negative error code on failure.
2974
 * Returns 0 on success or a negative error code on failure.
2943
 */
2975
 */
2944
static int drm_dp_mst_register_i2c_bus(struct drm_dp_aux *aux)
2976
static int drm_dp_mst_register_i2c_bus(struct drm_dp_aux *aux)
2945
{
2977
{
2946
	aux->ddc.algo = &drm_dp_mst_i2c_algo;
2978
	aux->ddc.algo = &drm_dp_mst_i2c_algo;
2947
	aux->ddc.algo_data = aux;
2979
	aux->ddc.algo_data = aux;
2948
	aux->ddc.retries = 3;
2980
	aux->ddc.retries = 3;
2949
 
2981
 
2950
	aux->ddc.class = I2C_CLASS_DDC;
2982
	aux->ddc.class = I2C_CLASS_DDC;
2951
	aux->ddc.owner = THIS_MODULE;
2983
	aux->ddc.owner = THIS_MODULE;
2952
	aux->ddc.dev.parent = aux->dev;
2984
	aux->ddc.dev.parent = aux->dev;
2953
 
2985
 
2954
	return i2c_add_adapter(&aux->ddc);
2986
	return i2c_add_adapter(&aux->ddc);
2955
}
2987
}
2956
 
2988
 
2957
/**
2989
/**
2958
 * drm_dp_mst_unregister_i2c_bus() - unregister an I2C-over-AUX adapter
2990
 * drm_dp_mst_unregister_i2c_bus() - unregister an I2C-over-AUX adapter
2959
 * @aux: DisplayPort AUX channel
2991
 * @aux: DisplayPort AUX channel
2960
 */
2992
 */
2961
static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_aux *aux)
2993
static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_aux *aux)
2962
{
2994
{
2963
	i2c_del_adapter(&aux->ddc);
2995
	i2c_del_adapter(&aux->ddc);
2964
}
2996
}