Subversion Repositories Kolibri OS

Rev

Rev 6320 | Rev 6935 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 6320 Rev 6660
1
/*
1
/*
2
 * Copyright © 2014 Red Hat
2
 * Copyright © 2014 Red Hat
3
 *
3
 *
4
 * Permission to use, copy, modify, distribute, and sell this software and its
4
 * Permission to use, copy, modify, distribute, and sell this software and its
5
 * documentation for any purpose is hereby granted without fee, provided that
5
 * documentation for any purpose is hereby granted without fee, provided that
6
 * the above copyright notice appear in all copies and that both that copyright
6
 * the above copyright notice appear in all copies and that both that copyright
7
 * notice and this permission notice appear in supporting documentation, and
7
 * notice and this permission notice appear in supporting documentation, and
8
 * that the name of the copyright holders not be used in advertising or
8
 * that the name of the copyright holders not be used in advertising or
9
 * publicity pertaining to distribution of the software without specific,
9
 * publicity pertaining to distribution of the software without specific,
10
 * written prior permission.  The copyright holders make no representations
10
 * written prior permission.  The copyright holders make no representations
11
 * about the suitability of this software for any purpose.  It is provided "as
11
 * about the suitability of this software for any purpose.  It is provided "as
12
 * is" without express or implied warranty.
12
 * is" without express or implied warranty.
13
 *
13
 *
14
 * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
14
 * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
15
 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
15
 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
16
 * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
16
 * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
17
 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
17
 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
18
 * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18
 * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
19
 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
19
 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
20
 * OF THIS SOFTWARE.
20
 * OF THIS SOFTWARE.
21
 */
21
 */
22
 
22
 
23
#include 
23
#include 
24
#include 
24
#include 
25
#include 
25
#include 
26
#include 
26
#include 
27
#include 
27
#include 
28
#include 
28
#include 
29
#include 
29
#include 
30
#include 
30
#include 
31
#include 
31
#include 
32
#include 
32
#include 
33
#include 
33
#include 
34
#include 
34
#include 
35
#include 
35
#include 
36
 
36
 
37
#include 
37
#include 
38
 
38
 
39
u64 get_jiffies_64(void)
39
u64 get_jiffies_64(void)
40
{
40
{
41
    return jiffies;
41
    return jiffies;
42
}
42
}
43
/**
43
/**
44
 * DOC: dp mst helper
44
 * DOC: dp mst helper
45
 *
45
 *
46
 * These functions contain parts of the DisplayPort 1.2a MultiStream Transport
46
 * These functions contain parts of the DisplayPort 1.2a MultiStream Transport
47
 * protocol. The helpers contain a topology manager and bandwidth manager.
47
 * protocol. The helpers contain a topology manager and bandwidth manager.
48
 * The helpers encapsulate the sending and received of sideband msgs.
48
 * The helpers encapsulate the sending and received of sideband msgs.
49
 */
49
 */
50
static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
50
static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
51
				  char *buf);
51
				  char *buf);
52
static int test_calc_pbn_mode(void);
52
static int test_calc_pbn_mode(void);
53
 
53
 
54
static void drm_dp_put_port(struct drm_dp_mst_port *port);
54
static void drm_dp_put_port(struct drm_dp_mst_port *port);
55
 
55
 
56
static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
56
static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
57
				     int id,
57
				     int id,
58
				     struct drm_dp_payload *payload);
58
				     struct drm_dp_payload *payload);
59
 
59
 
60
static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
60
static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
61
				  struct drm_dp_mst_port *port,
61
				  struct drm_dp_mst_port *port,
62
				  int offset, int size, u8 *bytes);
62
				  int offset, int size, u8 *bytes);
63
 
63
 
64
static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
64
static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
65
				     struct drm_dp_mst_branch *mstb);
65
				     struct drm_dp_mst_branch *mstb);
66
static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
66
static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
67
					   struct drm_dp_mst_branch *mstb,
67
					   struct drm_dp_mst_branch *mstb,
68
					   struct drm_dp_mst_port *port);
68
					   struct drm_dp_mst_port *port);
69
static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
69
static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
70
				 u8 *guid);
70
				 u8 *guid);
71
 
71
 
72
static int drm_dp_mst_register_i2c_bus(struct drm_dp_aux *aux);
72
static int drm_dp_mst_register_i2c_bus(struct drm_dp_aux *aux);
73
static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_aux *aux);
73
static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_aux *aux);
74
static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr);
74
static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr);
75
/* sideband msg handling */
75
/* sideband msg handling */
76
static u8 drm_dp_msg_header_crc4(const uint8_t *data, size_t num_nibbles)
76
static u8 drm_dp_msg_header_crc4(const uint8_t *data, size_t num_nibbles)
77
{
77
{
78
	u8 bitmask = 0x80;
78
	u8 bitmask = 0x80;
79
	u8 bitshift = 7;
79
	u8 bitshift = 7;
80
	u8 array_index = 0;
80
	u8 array_index = 0;
81
	int number_of_bits = num_nibbles * 4;
81
	int number_of_bits = num_nibbles * 4;
82
	u8 remainder = 0;
82
	u8 remainder = 0;
83
 
83
 
84
	while (number_of_bits != 0) {
84
	while (number_of_bits != 0) {
85
		number_of_bits--;
85
		number_of_bits--;
86
		remainder <<= 1;
86
		remainder <<= 1;
87
		remainder |= (data[array_index] & bitmask) >> bitshift;
87
		remainder |= (data[array_index] & bitmask) >> bitshift;
88
		bitmask >>= 1;
88
		bitmask >>= 1;
89
		bitshift--;
89
		bitshift--;
90
		if (bitmask == 0) {
90
		if (bitmask == 0) {
91
			bitmask = 0x80;
91
			bitmask = 0x80;
92
			bitshift = 7;
92
			bitshift = 7;
93
			array_index++;
93
			array_index++;
94
		}
94
		}
95
		if ((remainder & 0x10) == 0x10)
95
		if ((remainder & 0x10) == 0x10)
96
			remainder ^= 0x13;
96
			remainder ^= 0x13;
97
	}
97
	}
98
 
98
 
99
	number_of_bits = 4;
99
	number_of_bits = 4;
100
	while (number_of_bits != 0) {
100
	while (number_of_bits != 0) {
101
		number_of_bits--;
101
		number_of_bits--;
102
		remainder <<= 1;
102
		remainder <<= 1;
103
		if ((remainder & 0x10) != 0)
103
		if ((remainder & 0x10) != 0)
104
			remainder ^= 0x13;
104
			remainder ^= 0x13;
105
	}
105
	}
106
 
106
 
107
	return remainder;
107
	return remainder;
108
}
108
}
109
 
109
 
110
static u8 drm_dp_msg_data_crc4(const uint8_t *data, u8 number_of_bytes)
110
static u8 drm_dp_msg_data_crc4(const uint8_t *data, u8 number_of_bytes)
111
{
111
{
112
	u8 bitmask = 0x80;
112
	u8 bitmask = 0x80;
113
	u8 bitshift = 7;
113
	u8 bitshift = 7;
114
	u8 array_index = 0;
114
	u8 array_index = 0;
115
	int number_of_bits = number_of_bytes * 8;
115
	int number_of_bits = number_of_bytes * 8;
116
	u16 remainder = 0;
116
	u16 remainder = 0;
117
 
117
 
118
	while (number_of_bits != 0) {
118
	while (number_of_bits != 0) {
119
		number_of_bits--;
119
		number_of_bits--;
120
		remainder <<= 1;
120
		remainder <<= 1;
121
		remainder |= (data[array_index] & bitmask) >> bitshift;
121
		remainder |= (data[array_index] & bitmask) >> bitshift;
122
		bitmask >>= 1;
122
		bitmask >>= 1;
123
		bitshift--;
123
		bitshift--;
124
		if (bitmask == 0) {
124
		if (bitmask == 0) {
125
			bitmask = 0x80;
125
			bitmask = 0x80;
126
			bitshift = 7;
126
			bitshift = 7;
127
			array_index++;
127
			array_index++;
128
		}
128
		}
129
		if ((remainder & 0x100) == 0x100)
129
		if ((remainder & 0x100) == 0x100)
130
			remainder ^= 0xd5;
130
			remainder ^= 0xd5;
131
	}
131
	}
132
 
132
 
133
	number_of_bits = 8;
133
	number_of_bits = 8;
134
	while (number_of_bits != 0) {
134
	while (number_of_bits != 0) {
135
		number_of_bits--;
135
		number_of_bits--;
136
		remainder <<= 1;
136
		remainder <<= 1;
137
		if ((remainder & 0x100) != 0)
137
		if ((remainder & 0x100) != 0)
138
			remainder ^= 0xd5;
138
			remainder ^= 0xd5;
139
	}
139
	}
140
 
140
 
141
	return remainder & 0xff;
141
	return remainder & 0xff;
142
}
142
}
143
static inline u8 drm_dp_calc_sb_hdr_size(struct drm_dp_sideband_msg_hdr *hdr)
143
static inline u8 drm_dp_calc_sb_hdr_size(struct drm_dp_sideband_msg_hdr *hdr)
144
{
144
{
145
	u8 size = 3;
145
	u8 size = 3;
146
	size += (hdr->lct / 2);
146
	size += (hdr->lct / 2);
147
	return size;
147
	return size;
148
}
148
}
149
 
149
 
150
static void drm_dp_encode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr,
150
static void drm_dp_encode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr,
151
					   u8 *buf, int *len)
151
					   u8 *buf, int *len)
152
{
152
{
153
	int idx = 0;
153
	int idx = 0;
154
	int i;
154
	int i;
155
	u8 crc4;
155
	u8 crc4;
156
	buf[idx++] = ((hdr->lct & 0xf) << 4) | (hdr->lcr & 0xf);
156
	buf[idx++] = ((hdr->lct & 0xf) << 4) | (hdr->lcr & 0xf);
157
	for (i = 0; i < (hdr->lct / 2); i++)
157
	for (i = 0; i < (hdr->lct / 2); i++)
158
		buf[idx++] = hdr->rad[i];
158
		buf[idx++] = hdr->rad[i];
159
	buf[idx++] = (hdr->broadcast << 7) | (hdr->path_msg << 6) |
159
	buf[idx++] = (hdr->broadcast << 7) | (hdr->path_msg << 6) |
160
		(hdr->msg_len & 0x3f);
160
		(hdr->msg_len & 0x3f);
161
	buf[idx++] = (hdr->somt << 7) | (hdr->eomt << 6) | (hdr->seqno << 4);
161
	buf[idx++] = (hdr->somt << 7) | (hdr->eomt << 6) | (hdr->seqno << 4);
162
 
162
 
163
	crc4 = drm_dp_msg_header_crc4(buf, (idx * 2) - 1);
163
	crc4 = drm_dp_msg_header_crc4(buf, (idx * 2) - 1);
164
	buf[idx - 1] |= (crc4 & 0xf);
164
	buf[idx - 1] |= (crc4 & 0xf);
165
 
165
 
166
	*len = idx;
166
	*len = idx;
167
}
167
}
168
 
168
 
169
static bool drm_dp_decode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr,
169
static bool drm_dp_decode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr,
170
					   u8 *buf, int buflen, u8 *hdrlen)
170
					   u8 *buf, int buflen, u8 *hdrlen)
171
{
171
{
172
	u8 crc4;
172
	u8 crc4;
173
	u8 len;
173
	u8 len;
174
	int i;
174
	int i;
175
	u8 idx;
175
	u8 idx;
176
	if (buf[0] == 0)
176
	if (buf[0] == 0)
177
		return false;
177
		return false;
178
	len = 3;
178
	len = 3;
179
	len += ((buf[0] & 0xf0) >> 4) / 2;
179
	len += ((buf[0] & 0xf0) >> 4) / 2;
180
	if (len > buflen)
180
	if (len > buflen)
181
		return false;
181
		return false;
182
	crc4 = drm_dp_msg_header_crc4(buf, (len * 2) - 1);
182
	crc4 = drm_dp_msg_header_crc4(buf, (len * 2) - 1);
183
 
183
 
184
	if ((crc4 & 0xf) != (buf[len - 1] & 0xf)) {
184
	if ((crc4 & 0xf) != (buf[len - 1] & 0xf)) {
185
		DRM_DEBUG_KMS("crc4 mismatch 0x%x 0x%x\n", crc4, buf[len - 1]);
185
		DRM_DEBUG_KMS("crc4 mismatch 0x%x 0x%x\n", crc4, buf[len - 1]);
186
		return false;
186
		return false;
187
	}
187
	}
188
 
188
 
189
	hdr->lct = (buf[0] & 0xf0) >> 4;
189
	hdr->lct = (buf[0] & 0xf0) >> 4;
190
	hdr->lcr = (buf[0] & 0xf);
190
	hdr->lcr = (buf[0] & 0xf);
191
	idx = 1;
191
	idx = 1;
192
	for (i = 0; i < (hdr->lct / 2); i++)
192
	for (i = 0; i < (hdr->lct / 2); i++)
193
		hdr->rad[i] = buf[idx++];
193
		hdr->rad[i] = buf[idx++];
194
	hdr->broadcast = (buf[idx] >> 7) & 0x1;
194
	hdr->broadcast = (buf[idx] >> 7) & 0x1;
195
	hdr->path_msg = (buf[idx] >> 6) & 0x1;
195
	hdr->path_msg = (buf[idx] >> 6) & 0x1;
196
	hdr->msg_len = buf[idx] & 0x3f;
196
	hdr->msg_len = buf[idx] & 0x3f;
197
	idx++;
197
	idx++;
198
	hdr->somt = (buf[idx] >> 7) & 0x1;
198
	hdr->somt = (buf[idx] >> 7) & 0x1;
199
	hdr->eomt = (buf[idx] >> 6) & 0x1;
199
	hdr->eomt = (buf[idx] >> 6) & 0x1;
200
	hdr->seqno = (buf[idx] >> 4) & 0x1;
200
	hdr->seqno = (buf[idx] >> 4) & 0x1;
201
	idx++;
201
	idx++;
202
	*hdrlen = idx;
202
	*hdrlen = idx;
203
	return true;
203
	return true;
204
}
204
}
205
 
205
 
206
static void drm_dp_encode_sideband_req(struct drm_dp_sideband_msg_req_body *req,
206
static void drm_dp_encode_sideband_req(struct drm_dp_sideband_msg_req_body *req,
207
				       struct drm_dp_sideband_msg_tx *raw)
207
				       struct drm_dp_sideband_msg_tx *raw)
208
{
208
{
209
	int idx = 0;
209
	int idx = 0;
210
	int i;
210
	int i;
211
	u8 *buf = raw->msg;
211
	u8 *buf = raw->msg;
212
	buf[idx++] = req->req_type & 0x7f;
212
	buf[idx++] = req->req_type & 0x7f;
213
 
213
 
214
	switch (req->req_type) {
214
	switch (req->req_type) {
215
	case DP_ENUM_PATH_RESOURCES:
215
	case DP_ENUM_PATH_RESOURCES:
216
		buf[idx] = (req->u.port_num.port_number & 0xf) << 4;
216
		buf[idx] = (req->u.port_num.port_number & 0xf) << 4;
217
		idx++;
217
		idx++;
218
		break;
218
		break;
219
	case DP_ALLOCATE_PAYLOAD:
219
	case DP_ALLOCATE_PAYLOAD:
220
		buf[idx] = (req->u.allocate_payload.port_number & 0xf) << 4 |
220
		buf[idx] = (req->u.allocate_payload.port_number & 0xf) << 4 |
221
			(req->u.allocate_payload.number_sdp_streams & 0xf);
221
			(req->u.allocate_payload.number_sdp_streams & 0xf);
222
		idx++;
222
		idx++;
223
		buf[idx] = (req->u.allocate_payload.vcpi & 0x7f);
223
		buf[idx] = (req->u.allocate_payload.vcpi & 0x7f);
224
		idx++;
224
		idx++;
225
		buf[idx] = (req->u.allocate_payload.pbn >> 8);
225
		buf[idx] = (req->u.allocate_payload.pbn >> 8);
226
		idx++;
226
		idx++;
227
		buf[idx] = (req->u.allocate_payload.pbn & 0xff);
227
		buf[idx] = (req->u.allocate_payload.pbn & 0xff);
228
		idx++;
228
		idx++;
229
		for (i = 0; i < req->u.allocate_payload.number_sdp_streams / 2; i++) {
229
		for (i = 0; i < req->u.allocate_payload.number_sdp_streams / 2; i++) {
230
			buf[idx] = ((req->u.allocate_payload.sdp_stream_sink[i * 2] & 0xf) << 4) |
230
			buf[idx] = ((req->u.allocate_payload.sdp_stream_sink[i * 2] & 0xf) << 4) |
231
				(req->u.allocate_payload.sdp_stream_sink[i * 2 + 1] & 0xf);
231
				(req->u.allocate_payload.sdp_stream_sink[i * 2 + 1] & 0xf);
232
			idx++;
232
			idx++;
233
		}
233
		}
234
		if (req->u.allocate_payload.number_sdp_streams & 1) {
234
		if (req->u.allocate_payload.number_sdp_streams & 1) {
235
			i = req->u.allocate_payload.number_sdp_streams - 1;
235
			i = req->u.allocate_payload.number_sdp_streams - 1;
236
			buf[idx] = (req->u.allocate_payload.sdp_stream_sink[i] & 0xf) << 4;
236
			buf[idx] = (req->u.allocate_payload.sdp_stream_sink[i] & 0xf) << 4;
237
			idx++;
237
			idx++;
238
		}
238
		}
239
		break;
239
		break;
240
	case DP_QUERY_PAYLOAD:
240
	case DP_QUERY_PAYLOAD:
241
		buf[idx] = (req->u.query_payload.port_number & 0xf) << 4;
241
		buf[idx] = (req->u.query_payload.port_number & 0xf) << 4;
242
		idx++;
242
		idx++;
243
		buf[idx] = (req->u.query_payload.vcpi & 0x7f);
243
		buf[idx] = (req->u.query_payload.vcpi & 0x7f);
244
		idx++;
244
		idx++;
245
		break;
245
		break;
246
	case DP_REMOTE_DPCD_READ:
246
	case DP_REMOTE_DPCD_READ:
247
		buf[idx] = (req->u.dpcd_read.port_number & 0xf) << 4;
247
		buf[idx] = (req->u.dpcd_read.port_number & 0xf) << 4;
248
		buf[idx] |= ((req->u.dpcd_read.dpcd_address & 0xf0000) >> 16) & 0xf;
248
		buf[idx] |= ((req->u.dpcd_read.dpcd_address & 0xf0000) >> 16) & 0xf;
249
		idx++;
249
		idx++;
250
		buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff00) >> 8;
250
		buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff00) >> 8;
251
		idx++;
251
		idx++;
252
		buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff);
252
		buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff);
253
		idx++;
253
		idx++;
254
		buf[idx] = (req->u.dpcd_read.num_bytes);
254
		buf[idx] = (req->u.dpcd_read.num_bytes);
255
		idx++;
255
		idx++;
256
		break;
256
		break;
257
 
257
 
258
	case DP_REMOTE_DPCD_WRITE:
258
	case DP_REMOTE_DPCD_WRITE:
259
		buf[idx] = (req->u.dpcd_write.port_number & 0xf) << 4;
259
		buf[idx] = (req->u.dpcd_write.port_number & 0xf) << 4;
260
		buf[idx] |= ((req->u.dpcd_write.dpcd_address & 0xf0000) >> 16) & 0xf;
260
		buf[idx] |= ((req->u.dpcd_write.dpcd_address & 0xf0000) >> 16) & 0xf;
261
		idx++;
261
		idx++;
262
		buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff00) >> 8;
262
		buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff00) >> 8;
263
		idx++;
263
		idx++;
264
		buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff);
264
		buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff);
265
		idx++;
265
		idx++;
266
		buf[idx] = (req->u.dpcd_write.num_bytes);
266
		buf[idx] = (req->u.dpcd_write.num_bytes);
267
		idx++;
267
		idx++;
268
		memcpy(&buf[idx], req->u.dpcd_write.bytes, req->u.dpcd_write.num_bytes);
268
		memcpy(&buf[idx], req->u.dpcd_write.bytes, req->u.dpcd_write.num_bytes);
269
		idx += req->u.dpcd_write.num_bytes;
269
		idx += req->u.dpcd_write.num_bytes;
270
		break;
270
		break;
271
	case DP_REMOTE_I2C_READ:
271
	case DP_REMOTE_I2C_READ:
272
		buf[idx] = (req->u.i2c_read.port_number & 0xf) << 4;
272
		buf[idx] = (req->u.i2c_read.port_number & 0xf) << 4;
273
		buf[idx] |= (req->u.i2c_read.num_transactions & 0x3);
273
		buf[idx] |= (req->u.i2c_read.num_transactions & 0x3);
274
		idx++;
274
		idx++;
275
		for (i = 0; i < (req->u.i2c_read.num_transactions & 0x3); i++) {
275
		for (i = 0; i < (req->u.i2c_read.num_transactions & 0x3); i++) {
276
			buf[idx] = req->u.i2c_read.transactions[i].i2c_dev_id & 0x7f;
276
			buf[idx] = req->u.i2c_read.transactions[i].i2c_dev_id & 0x7f;
277
			idx++;
277
			idx++;
278
			buf[idx] = req->u.i2c_read.transactions[i].num_bytes;
278
			buf[idx] = req->u.i2c_read.transactions[i].num_bytes;
279
			idx++;
279
			idx++;
280
			memcpy(&buf[idx], req->u.i2c_read.transactions[i].bytes, req->u.i2c_read.transactions[i].num_bytes);
280
			memcpy(&buf[idx], req->u.i2c_read.transactions[i].bytes, req->u.i2c_read.transactions[i].num_bytes);
281
			idx += req->u.i2c_read.transactions[i].num_bytes;
281
			idx += req->u.i2c_read.transactions[i].num_bytes;
282
 
282
 
283
			buf[idx] = (req->u.i2c_read.transactions[i].no_stop_bit & 0x1) << 5;
283
			buf[idx] = (req->u.i2c_read.transactions[i].no_stop_bit & 0x1) << 5;
284
			buf[idx] |= (req->u.i2c_read.transactions[i].i2c_transaction_delay & 0xf);
284
			buf[idx] |= (req->u.i2c_read.transactions[i].i2c_transaction_delay & 0xf);
285
			idx++;
285
			idx++;
286
		}
286
		}
287
		buf[idx] = (req->u.i2c_read.read_i2c_device_id) & 0x7f;
287
		buf[idx] = (req->u.i2c_read.read_i2c_device_id) & 0x7f;
288
		idx++;
288
		idx++;
289
		buf[idx] = (req->u.i2c_read.num_bytes_read);
289
		buf[idx] = (req->u.i2c_read.num_bytes_read);
290
		idx++;
290
		idx++;
291
		break;
291
		break;
292
 
292
 
293
	case DP_REMOTE_I2C_WRITE:
293
	case DP_REMOTE_I2C_WRITE:
294
		buf[idx] = (req->u.i2c_write.port_number & 0xf) << 4;
294
		buf[idx] = (req->u.i2c_write.port_number & 0xf) << 4;
295
		idx++;
295
		idx++;
296
		buf[idx] = (req->u.i2c_write.write_i2c_device_id) & 0x7f;
296
		buf[idx] = (req->u.i2c_write.write_i2c_device_id) & 0x7f;
297
		idx++;
297
		idx++;
298
		buf[idx] = (req->u.i2c_write.num_bytes);
298
		buf[idx] = (req->u.i2c_write.num_bytes);
299
		idx++;
299
		idx++;
300
		memcpy(&buf[idx], req->u.i2c_write.bytes, req->u.i2c_write.num_bytes);
300
		memcpy(&buf[idx], req->u.i2c_write.bytes, req->u.i2c_write.num_bytes);
301
		idx += req->u.i2c_write.num_bytes;
301
		idx += req->u.i2c_write.num_bytes;
302
		break;
302
		break;
303
	}
303
	}
304
	raw->cur_len = idx;
304
	raw->cur_len = idx;
305
}
305
}
306
 
306
 
307
static void drm_dp_crc_sideband_chunk_req(u8 *msg, u8 len)
307
static void drm_dp_crc_sideband_chunk_req(u8 *msg, u8 len)
308
{
308
{
309
	u8 crc4;
309
	u8 crc4;
310
	crc4 = drm_dp_msg_data_crc4(msg, len);
310
	crc4 = drm_dp_msg_data_crc4(msg, len);
311
	msg[len] = crc4;
311
	msg[len] = crc4;
312
}
312
}
313
 
313
 
314
static void drm_dp_encode_sideband_reply(struct drm_dp_sideband_msg_reply_body *rep,
314
static void drm_dp_encode_sideband_reply(struct drm_dp_sideband_msg_reply_body *rep,
315
					 struct drm_dp_sideband_msg_tx *raw)
315
					 struct drm_dp_sideband_msg_tx *raw)
316
{
316
{
317
	int idx = 0;
317
	int idx = 0;
318
	u8 *buf = raw->msg;
318
	u8 *buf = raw->msg;
319
 
319
 
320
	buf[idx++] = (rep->reply_type & 0x1) << 7 | (rep->req_type & 0x7f);
320
	buf[idx++] = (rep->reply_type & 0x1) << 7 | (rep->req_type & 0x7f);
321
 
321
 
322
	raw->cur_len = idx;
322
	raw->cur_len = idx;
323
}
323
}
324
 
324
 
325
/* this adds a chunk of msg to the builder to get the final msg */
325
/* this adds a chunk of msg to the builder to get the final msg */
326
static bool drm_dp_sideband_msg_build(struct drm_dp_sideband_msg_rx *msg,
326
static bool drm_dp_sideband_msg_build(struct drm_dp_sideband_msg_rx *msg,
327
				      u8 *replybuf, u8 replybuflen, bool hdr)
327
				      u8 *replybuf, u8 replybuflen, bool hdr)
328
{
328
{
329
	int ret;
329
	int ret;
330
	u8 crc4;
330
	u8 crc4;
331
 
331
 
332
	if (hdr) {
332
	if (hdr) {
333
		u8 hdrlen;
333
		u8 hdrlen;
334
		struct drm_dp_sideband_msg_hdr recv_hdr;
334
		struct drm_dp_sideband_msg_hdr recv_hdr;
335
		ret = drm_dp_decode_sideband_msg_hdr(&recv_hdr, replybuf, replybuflen, &hdrlen);
335
		ret = drm_dp_decode_sideband_msg_hdr(&recv_hdr, replybuf, replybuflen, &hdrlen);
336
		if (ret == false) {
336
		if (ret == false) {
337
			print_hex_dump(KERN_DEBUG, "failed hdr", DUMP_PREFIX_NONE, 16, 1, replybuf, replybuflen, false);
337
			print_hex_dump(KERN_DEBUG, "failed hdr", DUMP_PREFIX_NONE, 16, 1, replybuf, replybuflen, false);
338
			return false;
338
			return false;
339
		}
339
		}
340
 
340
 
341
		/* get length contained in this portion */
341
		/* get length contained in this portion */
342
		msg->curchunk_len = recv_hdr.msg_len;
342
		msg->curchunk_len = recv_hdr.msg_len;
343
		msg->curchunk_hdrlen = hdrlen;
343
		msg->curchunk_hdrlen = hdrlen;
344
 
344
 
345
		/* we have already gotten an somt - don't bother parsing */
345
		/* we have already gotten an somt - don't bother parsing */
346
		if (recv_hdr.somt && msg->have_somt)
346
		if (recv_hdr.somt && msg->have_somt)
347
			return false;
347
			return false;
348
 
348
 
349
		if (recv_hdr.somt) {
349
		if (recv_hdr.somt) {
350
			memcpy(&msg->initial_hdr, &recv_hdr, sizeof(struct drm_dp_sideband_msg_hdr));
350
			memcpy(&msg->initial_hdr, &recv_hdr, sizeof(struct drm_dp_sideband_msg_hdr));
351
			msg->have_somt = true;
351
			msg->have_somt = true;
352
		}
352
		}
353
		if (recv_hdr.eomt)
353
		if (recv_hdr.eomt)
354
			msg->have_eomt = true;
354
			msg->have_eomt = true;
355
 
355
 
356
		/* copy the bytes for the remainder of this header chunk */
356
		/* copy the bytes for the remainder of this header chunk */
357
		msg->curchunk_idx = min(msg->curchunk_len, (u8)(replybuflen - hdrlen));
357
		msg->curchunk_idx = min(msg->curchunk_len, (u8)(replybuflen - hdrlen));
358
		memcpy(&msg->chunk[0], replybuf + hdrlen, msg->curchunk_idx);
358
		memcpy(&msg->chunk[0], replybuf + hdrlen, msg->curchunk_idx);
359
	} else {
359
	} else {
360
		memcpy(&msg->chunk[msg->curchunk_idx], replybuf, replybuflen);
360
		memcpy(&msg->chunk[msg->curchunk_idx], replybuf, replybuflen);
361
		msg->curchunk_idx += replybuflen;
361
		msg->curchunk_idx += replybuflen;
362
	}
362
	}
363
 
363
 
364
	if (msg->curchunk_idx >= msg->curchunk_len) {
364
	if (msg->curchunk_idx >= msg->curchunk_len) {
365
		/* do CRC */
365
		/* do CRC */
366
		crc4 = drm_dp_msg_data_crc4(msg->chunk, msg->curchunk_len - 1);
366
		crc4 = drm_dp_msg_data_crc4(msg->chunk, msg->curchunk_len - 1);
367
		/* copy chunk into bigger msg */
367
		/* copy chunk into bigger msg */
368
		memcpy(&msg->msg[msg->curlen], msg->chunk, msg->curchunk_len - 1);
368
		memcpy(&msg->msg[msg->curlen], msg->chunk, msg->curchunk_len - 1);
369
		msg->curlen += msg->curchunk_len - 1;
369
		msg->curlen += msg->curchunk_len - 1;
370
	}
370
	}
371
	return true;
371
	return true;
372
}
372
}
373
 
373
 
374
static bool drm_dp_sideband_parse_link_address(struct drm_dp_sideband_msg_rx *raw,
374
static bool drm_dp_sideband_parse_link_address(struct drm_dp_sideband_msg_rx *raw,
375
					       struct drm_dp_sideband_msg_reply_body *repmsg)
375
					       struct drm_dp_sideband_msg_reply_body *repmsg)
376
{
376
{
377
	int idx = 1;
377
	int idx = 1;
378
	int i;
378
	int i;
379
	memcpy(repmsg->u.link_addr.guid, &raw->msg[idx], 16);
379
	memcpy(repmsg->u.link_addr.guid, &raw->msg[idx], 16);
380
	idx += 16;
380
	idx += 16;
381
	repmsg->u.link_addr.nports = raw->msg[idx] & 0xf;
381
	repmsg->u.link_addr.nports = raw->msg[idx] & 0xf;
382
	idx++;
382
	idx++;
383
	if (idx > raw->curlen)
383
	if (idx > raw->curlen)
384
		goto fail_len;
384
		goto fail_len;
385
	for (i = 0; i < repmsg->u.link_addr.nports; i++) {
385
	for (i = 0; i < repmsg->u.link_addr.nports; i++) {
386
		if (raw->msg[idx] & 0x80)
386
		if (raw->msg[idx] & 0x80)
387
			repmsg->u.link_addr.ports[i].input_port = 1;
387
			repmsg->u.link_addr.ports[i].input_port = 1;
388
 
388
 
389
		repmsg->u.link_addr.ports[i].peer_device_type = (raw->msg[idx] >> 4) & 0x7;
389
		repmsg->u.link_addr.ports[i].peer_device_type = (raw->msg[idx] >> 4) & 0x7;
390
		repmsg->u.link_addr.ports[i].port_number = (raw->msg[idx] & 0xf);
390
		repmsg->u.link_addr.ports[i].port_number = (raw->msg[idx] & 0xf);
391
 
391
 
392
		idx++;
392
		idx++;
393
		if (idx > raw->curlen)
393
		if (idx > raw->curlen)
394
			goto fail_len;
394
			goto fail_len;
395
		repmsg->u.link_addr.ports[i].mcs = (raw->msg[idx] >> 7) & 0x1;
395
		repmsg->u.link_addr.ports[i].mcs = (raw->msg[idx] >> 7) & 0x1;
396
		repmsg->u.link_addr.ports[i].ddps = (raw->msg[idx] >> 6) & 0x1;
396
		repmsg->u.link_addr.ports[i].ddps = (raw->msg[idx] >> 6) & 0x1;
397
		if (repmsg->u.link_addr.ports[i].input_port == 0)
397
		if (repmsg->u.link_addr.ports[i].input_port == 0)
398
			repmsg->u.link_addr.ports[i].legacy_device_plug_status = (raw->msg[idx] >> 5) & 0x1;
398
			repmsg->u.link_addr.ports[i].legacy_device_plug_status = (raw->msg[idx] >> 5) & 0x1;
399
		idx++;
399
		idx++;
400
		if (idx > raw->curlen)
400
		if (idx > raw->curlen)
401
			goto fail_len;
401
			goto fail_len;
402
		if (repmsg->u.link_addr.ports[i].input_port == 0) {
402
		if (repmsg->u.link_addr.ports[i].input_port == 0) {
403
			repmsg->u.link_addr.ports[i].dpcd_revision = (raw->msg[idx]);
403
			repmsg->u.link_addr.ports[i].dpcd_revision = (raw->msg[idx]);
404
			idx++;
404
			idx++;
405
			if (idx > raw->curlen)
405
			if (idx > raw->curlen)
406
				goto fail_len;
406
				goto fail_len;
407
			memcpy(repmsg->u.link_addr.ports[i].peer_guid, &raw->msg[idx], 16);
407
			memcpy(repmsg->u.link_addr.ports[i].peer_guid, &raw->msg[idx], 16);
408
			idx += 16;
408
			idx += 16;
409
			if (idx > raw->curlen)
409
			if (idx > raw->curlen)
410
				goto fail_len;
410
				goto fail_len;
411
			repmsg->u.link_addr.ports[i].num_sdp_streams = (raw->msg[idx] >> 4) & 0xf;
411
			repmsg->u.link_addr.ports[i].num_sdp_streams = (raw->msg[idx] >> 4) & 0xf;
412
			repmsg->u.link_addr.ports[i].num_sdp_stream_sinks = (raw->msg[idx] & 0xf);
412
			repmsg->u.link_addr.ports[i].num_sdp_stream_sinks = (raw->msg[idx] & 0xf);
413
			idx++;
413
			idx++;
414
 
414
 
415
		}
415
		}
416
		if (idx > raw->curlen)
416
		if (idx > raw->curlen)
417
			goto fail_len;
417
			goto fail_len;
418
	}
418
	}
419
 
419
 
420
	return true;
420
	return true;
421
fail_len:
421
fail_len:
422
	DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen);
422
	DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen);
423
	return false;
423
	return false;
424
}
424
}
425
 
425
 
426
static bool drm_dp_sideband_parse_remote_dpcd_read(struct drm_dp_sideband_msg_rx *raw,
426
static bool drm_dp_sideband_parse_remote_dpcd_read(struct drm_dp_sideband_msg_rx *raw,
427
						   struct drm_dp_sideband_msg_reply_body *repmsg)
427
						   struct drm_dp_sideband_msg_reply_body *repmsg)
428
{
428
{
429
	int idx = 1;
429
	int idx = 1;
430
	repmsg->u.remote_dpcd_read_ack.port_number = raw->msg[idx] & 0xf;
430
	repmsg->u.remote_dpcd_read_ack.port_number = raw->msg[idx] & 0xf;
431
	idx++;
431
	idx++;
432
	if (idx > raw->curlen)
432
	if (idx > raw->curlen)
433
		goto fail_len;
433
		goto fail_len;
434
	repmsg->u.remote_dpcd_read_ack.num_bytes = raw->msg[idx];
434
	repmsg->u.remote_dpcd_read_ack.num_bytes = raw->msg[idx];
435
	if (idx > raw->curlen)
435
	if (idx > raw->curlen)
436
		goto fail_len;
436
		goto fail_len;
437
 
437
 
438
	memcpy(repmsg->u.remote_dpcd_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_dpcd_read_ack.num_bytes);
438
	memcpy(repmsg->u.remote_dpcd_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_dpcd_read_ack.num_bytes);
439
	return true;
439
	return true;
440
fail_len:
440
fail_len:
441
	DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen);
441
	DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen);
442
	return false;
442
	return false;
443
}
443
}
444
 
444
 
445
static bool drm_dp_sideband_parse_remote_dpcd_write(struct drm_dp_sideband_msg_rx *raw,
445
static bool drm_dp_sideband_parse_remote_dpcd_write(struct drm_dp_sideband_msg_rx *raw,
446
						      struct drm_dp_sideband_msg_reply_body *repmsg)
446
						      struct drm_dp_sideband_msg_reply_body *repmsg)
447
{
447
{
448
	int idx = 1;
448
	int idx = 1;
449
	repmsg->u.remote_dpcd_write_ack.port_number = raw->msg[idx] & 0xf;
449
	repmsg->u.remote_dpcd_write_ack.port_number = raw->msg[idx] & 0xf;
450
	idx++;
450
	idx++;
451
	if (idx > raw->curlen)
451
	if (idx > raw->curlen)
452
		goto fail_len;
452
		goto fail_len;
453
	return true;
453
	return true;
454
fail_len:
454
fail_len:
455
	DRM_DEBUG_KMS("parse length fail %d %d\n", idx, raw->curlen);
455
	DRM_DEBUG_KMS("parse length fail %d %d\n", idx, raw->curlen);
456
	return false;
456
	return false;
457
}
457
}
458
 
458
 
459
static bool drm_dp_sideband_parse_remote_i2c_read_ack(struct drm_dp_sideband_msg_rx *raw,
459
static bool drm_dp_sideband_parse_remote_i2c_read_ack(struct drm_dp_sideband_msg_rx *raw,
460
						      struct drm_dp_sideband_msg_reply_body *repmsg)
460
						      struct drm_dp_sideband_msg_reply_body *repmsg)
461
{
461
{
462
	int idx = 1;
462
	int idx = 1;
463
 
463
 
464
	repmsg->u.remote_i2c_read_ack.port_number = (raw->msg[idx] & 0xf);
464
	repmsg->u.remote_i2c_read_ack.port_number = (raw->msg[idx] & 0xf);
465
	idx++;
465
	idx++;
466
	if (idx > raw->curlen)
466
	if (idx > raw->curlen)
467
		goto fail_len;
467
		goto fail_len;
468
	repmsg->u.remote_i2c_read_ack.num_bytes = raw->msg[idx];
468
	repmsg->u.remote_i2c_read_ack.num_bytes = raw->msg[idx];
469
	idx++;
469
	idx++;
470
	/* TODO check */
470
	/* TODO check */
471
	memcpy(repmsg->u.remote_i2c_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_i2c_read_ack.num_bytes);
471
	memcpy(repmsg->u.remote_i2c_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_i2c_read_ack.num_bytes);
472
	return true;
472
	return true;
473
fail_len:
473
fail_len:
474
	DRM_DEBUG_KMS("remote i2c reply parse length fail %d %d\n", idx, raw->curlen);
474
	DRM_DEBUG_KMS("remote i2c reply parse length fail %d %d\n", idx, raw->curlen);
475
	return false;
475
	return false;
476
}
476
}
477
 
477
 
478
static bool drm_dp_sideband_parse_enum_path_resources_ack(struct drm_dp_sideband_msg_rx *raw,
478
static bool drm_dp_sideband_parse_enum_path_resources_ack(struct drm_dp_sideband_msg_rx *raw,
479
							  struct drm_dp_sideband_msg_reply_body *repmsg)
479
							  struct drm_dp_sideband_msg_reply_body *repmsg)
480
{
480
{
481
	int idx = 1;
481
	int idx = 1;
482
	repmsg->u.path_resources.port_number = (raw->msg[idx] >> 4) & 0xf;
482
	repmsg->u.path_resources.port_number = (raw->msg[idx] >> 4) & 0xf;
483
	idx++;
483
	idx++;
484
	if (idx > raw->curlen)
484
	if (idx > raw->curlen)
485
		goto fail_len;
485
		goto fail_len;
486
	repmsg->u.path_resources.full_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
486
	repmsg->u.path_resources.full_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
487
	idx += 2;
487
	idx += 2;
488
	if (idx > raw->curlen)
488
	if (idx > raw->curlen)
489
		goto fail_len;
489
		goto fail_len;
490
	repmsg->u.path_resources.avail_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
490
	repmsg->u.path_resources.avail_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
491
	idx += 2;
491
	idx += 2;
492
	if (idx > raw->curlen)
492
	if (idx > raw->curlen)
493
		goto fail_len;
493
		goto fail_len;
494
	return true;
494
	return true;
495
fail_len:
495
fail_len:
496
	DRM_DEBUG_KMS("enum resource parse length fail %d %d\n", idx, raw->curlen);
496
	DRM_DEBUG_KMS("enum resource parse length fail %d %d\n", idx, raw->curlen);
497
	return false;
497
	return false;
498
}
498
}
499
 
499
 
500
static bool drm_dp_sideband_parse_allocate_payload_ack(struct drm_dp_sideband_msg_rx *raw,
500
static bool drm_dp_sideband_parse_allocate_payload_ack(struct drm_dp_sideband_msg_rx *raw,
501
							  struct drm_dp_sideband_msg_reply_body *repmsg)
501
							  struct drm_dp_sideband_msg_reply_body *repmsg)
502
{
502
{
503
	int idx = 1;
503
	int idx = 1;
504
	repmsg->u.allocate_payload.port_number = (raw->msg[idx] >> 4) & 0xf;
504
	repmsg->u.allocate_payload.port_number = (raw->msg[idx] >> 4) & 0xf;
505
	idx++;
505
	idx++;
506
	if (idx > raw->curlen)
506
	if (idx > raw->curlen)
507
		goto fail_len;
507
		goto fail_len;
508
	repmsg->u.allocate_payload.vcpi = raw->msg[idx];
508
	repmsg->u.allocate_payload.vcpi = raw->msg[idx];
509
	idx++;
509
	idx++;
510
	if (idx > raw->curlen)
510
	if (idx > raw->curlen)
511
		goto fail_len;
511
		goto fail_len;
512
	repmsg->u.allocate_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
512
	repmsg->u.allocate_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
513
	idx += 2;
513
	idx += 2;
514
	if (idx > raw->curlen)
514
	if (idx > raw->curlen)
515
		goto fail_len;
515
		goto fail_len;
516
	return true;
516
	return true;
517
fail_len:
517
fail_len:
518
	DRM_DEBUG_KMS("allocate payload parse length fail %d %d\n", idx, raw->curlen);
518
	DRM_DEBUG_KMS("allocate payload parse length fail %d %d\n", idx, raw->curlen);
519
	return false;
519
	return false;
520
}
520
}
521
 
521
 
522
static bool drm_dp_sideband_parse_query_payload_ack(struct drm_dp_sideband_msg_rx *raw,
522
static bool drm_dp_sideband_parse_query_payload_ack(struct drm_dp_sideband_msg_rx *raw,
523
						    struct drm_dp_sideband_msg_reply_body *repmsg)
523
						    struct drm_dp_sideband_msg_reply_body *repmsg)
524
{
524
{
525
	int idx = 1;
525
	int idx = 1;
526
	repmsg->u.query_payload.port_number = (raw->msg[idx] >> 4) & 0xf;
526
	repmsg->u.query_payload.port_number = (raw->msg[idx] >> 4) & 0xf;
527
	idx++;
527
	idx++;
528
	if (idx > raw->curlen)
528
	if (idx > raw->curlen)
529
		goto fail_len;
529
		goto fail_len;
530
	repmsg->u.query_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]);
530
	repmsg->u.query_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]);
531
	idx += 2;
531
	idx += 2;
532
	if (idx > raw->curlen)
532
	if (idx > raw->curlen)
533
		goto fail_len;
533
		goto fail_len;
534
	return true;
534
	return true;
535
fail_len:
535
fail_len:
536
	DRM_DEBUG_KMS("query payload parse length fail %d %d\n", idx, raw->curlen);
536
	DRM_DEBUG_KMS("query payload parse length fail %d %d\n", idx, raw->curlen);
537
	return false;
537
	return false;
538
}
538
}
539
 
539
 
540
static bool drm_dp_sideband_parse_reply(struct drm_dp_sideband_msg_rx *raw,
540
static bool drm_dp_sideband_parse_reply(struct drm_dp_sideband_msg_rx *raw,
541
					struct drm_dp_sideband_msg_reply_body *msg)
541
					struct drm_dp_sideband_msg_reply_body *msg)
542
{
542
{
543
	memset(msg, 0, sizeof(*msg));
543
	memset(msg, 0, sizeof(*msg));
544
	msg->reply_type = (raw->msg[0] & 0x80) >> 7;
544
	msg->reply_type = (raw->msg[0] & 0x80) >> 7;
545
	msg->req_type = (raw->msg[0] & 0x7f);
545
	msg->req_type = (raw->msg[0] & 0x7f);
546
 
546
 
547
	if (msg->reply_type) {
547
	if (msg->reply_type) {
548
		memcpy(msg->u.nak.guid, &raw->msg[1], 16);
548
		memcpy(msg->u.nak.guid, &raw->msg[1], 16);
549
		msg->u.nak.reason = raw->msg[17];
549
		msg->u.nak.reason = raw->msg[17];
550
		msg->u.nak.nak_data = raw->msg[18];
550
		msg->u.nak.nak_data = raw->msg[18];
551
		return false;
551
		return false;
552
	}
552
	}
553
 
553
 
554
	switch (msg->req_type) {
554
	switch (msg->req_type) {
555
	case DP_LINK_ADDRESS:
555
	case DP_LINK_ADDRESS:
556
		return drm_dp_sideband_parse_link_address(raw, msg);
556
		return drm_dp_sideband_parse_link_address(raw, msg);
557
	case DP_QUERY_PAYLOAD:
557
	case DP_QUERY_PAYLOAD:
558
		return drm_dp_sideband_parse_query_payload_ack(raw, msg);
558
		return drm_dp_sideband_parse_query_payload_ack(raw, msg);
559
	case DP_REMOTE_DPCD_READ:
559
	case DP_REMOTE_DPCD_READ:
560
		return drm_dp_sideband_parse_remote_dpcd_read(raw, msg);
560
		return drm_dp_sideband_parse_remote_dpcd_read(raw, msg);
561
	case DP_REMOTE_DPCD_WRITE:
561
	case DP_REMOTE_DPCD_WRITE:
562
		return drm_dp_sideband_parse_remote_dpcd_write(raw, msg);
562
		return drm_dp_sideband_parse_remote_dpcd_write(raw, msg);
563
	case DP_REMOTE_I2C_READ:
563
	case DP_REMOTE_I2C_READ:
564
		return drm_dp_sideband_parse_remote_i2c_read_ack(raw, msg);
564
		return drm_dp_sideband_parse_remote_i2c_read_ack(raw, msg);
565
	case DP_ENUM_PATH_RESOURCES:
565
	case DP_ENUM_PATH_RESOURCES:
566
		return drm_dp_sideband_parse_enum_path_resources_ack(raw, msg);
566
		return drm_dp_sideband_parse_enum_path_resources_ack(raw, msg);
567
	case DP_ALLOCATE_PAYLOAD:
567
	case DP_ALLOCATE_PAYLOAD:
568
		return drm_dp_sideband_parse_allocate_payload_ack(raw, msg);
568
		return drm_dp_sideband_parse_allocate_payload_ack(raw, msg);
569
	default:
569
	default:
570
		DRM_ERROR("Got unknown reply 0x%02x\n", msg->req_type);
570
		DRM_ERROR("Got unknown reply 0x%02x\n", msg->req_type);
571
		return false;
571
		return false;
572
	}
572
	}
573
}
573
}
574
 
574
 
575
static bool drm_dp_sideband_parse_connection_status_notify(struct drm_dp_sideband_msg_rx *raw,
575
static bool drm_dp_sideband_parse_connection_status_notify(struct drm_dp_sideband_msg_rx *raw,
576
							   struct drm_dp_sideband_msg_req_body *msg)
576
							   struct drm_dp_sideband_msg_req_body *msg)
577
{
577
{
578
	int idx = 1;
578
	int idx = 1;
579
 
579
 
580
	msg->u.conn_stat.port_number = (raw->msg[idx] & 0xf0) >> 4;
580
	msg->u.conn_stat.port_number = (raw->msg[idx] & 0xf0) >> 4;
581
	idx++;
581
	idx++;
582
	if (idx > raw->curlen)
582
	if (idx > raw->curlen)
583
		goto fail_len;
583
		goto fail_len;
584
 
584
 
585
	memcpy(msg->u.conn_stat.guid, &raw->msg[idx], 16);
585
	memcpy(msg->u.conn_stat.guid, &raw->msg[idx], 16);
586
	idx += 16;
586
	idx += 16;
587
	if (idx > raw->curlen)
587
	if (idx > raw->curlen)
588
		goto fail_len;
588
		goto fail_len;
589
 
589
 
590
	msg->u.conn_stat.legacy_device_plug_status = (raw->msg[idx] >> 6) & 0x1;
590
	msg->u.conn_stat.legacy_device_plug_status = (raw->msg[idx] >> 6) & 0x1;
591
	msg->u.conn_stat.displayport_device_plug_status = (raw->msg[idx] >> 5) & 0x1;
591
	msg->u.conn_stat.displayport_device_plug_status = (raw->msg[idx] >> 5) & 0x1;
592
	msg->u.conn_stat.message_capability_status = (raw->msg[idx] >> 4) & 0x1;
592
	msg->u.conn_stat.message_capability_status = (raw->msg[idx] >> 4) & 0x1;
593
	msg->u.conn_stat.input_port = (raw->msg[idx] >> 3) & 0x1;
593
	msg->u.conn_stat.input_port = (raw->msg[idx] >> 3) & 0x1;
594
	msg->u.conn_stat.peer_device_type = (raw->msg[idx] & 0x7);
594
	msg->u.conn_stat.peer_device_type = (raw->msg[idx] & 0x7);
595
	idx++;
595
	idx++;
596
	return true;
596
	return true;
597
fail_len:
597
fail_len:
598
	DRM_DEBUG_KMS("connection status reply parse length fail %d %d\n", idx, raw->curlen);
598
	DRM_DEBUG_KMS("connection status reply parse length fail %d %d\n", idx, raw->curlen);
599
	return false;
599
	return false;
600
}
600
}
601
 
601
 
602
static bool drm_dp_sideband_parse_resource_status_notify(struct drm_dp_sideband_msg_rx *raw,
602
static bool drm_dp_sideband_parse_resource_status_notify(struct drm_dp_sideband_msg_rx *raw,
603
							   struct drm_dp_sideband_msg_req_body *msg)
603
							   struct drm_dp_sideband_msg_req_body *msg)
604
{
604
{
605
	int idx = 1;
605
	int idx = 1;
606
 
606
 
607
	msg->u.resource_stat.port_number = (raw->msg[idx] & 0xf0) >> 4;
607
	msg->u.resource_stat.port_number = (raw->msg[idx] & 0xf0) >> 4;
608
	idx++;
608
	idx++;
609
	if (idx > raw->curlen)
609
	if (idx > raw->curlen)
610
		goto fail_len;
610
		goto fail_len;
611
 
611
 
612
	memcpy(msg->u.resource_stat.guid, &raw->msg[idx], 16);
612
	memcpy(msg->u.resource_stat.guid, &raw->msg[idx], 16);
613
	idx += 16;
613
	idx += 16;
614
	if (idx > raw->curlen)
614
	if (idx > raw->curlen)
615
		goto fail_len;
615
		goto fail_len;
616
 
616
 
617
	msg->u.resource_stat.available_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]);
617
	msg->u.resource_stat.available_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]);
618
	idx++;
618
	idx++;
619
	return true;
619
	return true;
620
fail_len:
620
fail_len:
621
	DRM_DEBUG_KMS("resource status reply parse length fail %d %d\n", idx, raw->curlen);
621
	DRM_DEBUG_KMS("resource status reply parse length fail %d %d\n", idx, raw->curlen);
622
	return false;
622
	return false;
623
}
623
}
624
 
624
 
625
static bool drm_dp_sideband_parse_req(struct drm_dp_sideband_msg_rx *raw,
625
static bool drm_dp_sideband_parse_req(struct drm_dp_sideband_msg_rx *raw,
626
				      struct drm_dp_sideband_msg_req_body *msg)
626
				      struct drm_dp_sideband_msg_req_body *msg)
627
{
627
{
628
	memset(msg, 0, sizeof(*msg));
628
	memset(msg, 0, sizeof(*msg));
629
	msg->req_type = (raw->msg[0] & 0x7f);
629
	msg->req_type = (raw->msg[0] & 0x7f);
630
 
630
 
631
	switch (msg->req_type) {
631
	switch (msg->req_type) {
632
	case DP_CONNECTION_STATUS_NOTIFY:
632
	case DP_CONNECTION_STATUS_NOTIFY:
633
		return drm_dp_sideband_parse_connection_status_notify(raw, msg);
633
		return drm_dp_sideband_parse_connection_status_notify(raw, msg);
634
	case DP_RESOURCE_STATUS_NOTIFY:
634
	case DP_RESOURCE_STATUS_NOTIFY:
635
		return drm_dp_sideband_parse_resource_status_notify(raw, msg);
635
		return drm_dp_sideband_parse_resource_status_notify(raw, msg);
636
	default:
636
	default:
637
		DRM_ERROR("Got unknown request 0x%02x\n", msg->req_type);
637
		DRM_ERROR("Got unknown request 0x%02x\n", msg->req_type);
638
		return false;
638
		return false;
639
	}
639
	}
640
}
640
}
641
 
641
 
642
static int build_dpcd_write(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32 offset, u8 num_bytes, u8 *bytes)
642
static int build_dpcd_write(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32 offset, u8 num_bytes, u8 *bytes)
643
{
643
{
644
	struct drm_dp_sideband_msg_req_body req;
644
	struct drm_dp_sideband_msg_req_body req;
645
 
645
 
646
	req.req_type = DP_REMOTE_DPCD_WRITE;
646
	req.req_type = DP_REMOTE_DPCD_WRITE;
647
	req.u.dpcd_write.port_number = port_num;
647
	req.u.dpcd_write.port_number = port_num;
648
	req.u.dpcd_write.dpcd_address = offset;
648
	req.u.dpcd_write.dpcd_address = offset;
649
	req.u.dpcd_write.num_bytes = num_bytes;
649
	req.u.dpcd_write.num_bytes = num_bytes;
650
	req.u.dpcd_write.bytes = bytes;
650
	req.u.dpcd_write.bytes = bytes;
651
	drm_dp_encode_sideband_req(&req, msg);
651
	drm_dp_encode_sideband_req(&req, msg);
652
 
652
 
653
	return 0;
653
	return 0;
654
}
654
}
655
 
655
 
656
static int build_link_address(struct drm_dp_sideband_msg_tx *msg)
656
static int build_link_address(struct drm_dp_sideband_msg_tx *msg)
657
{
657
{
658
	struct drm_dp_sideband_msg_req_body req;
658
	struct drm_dp_sideband_msg_req_body req;
659
 
659
 
660
	req.req_type = DP_LINK_ADDRESS;
660
	req.req_type = DP_LINK_ADDRESS;
661
	drm_dp_encode_sideband_req(&req, msg);
661
	drm_dp_encode_sideband_req(&req, msg);
662
	return 0;
662
	return 0;
663
}
663
}
664
 
664
 
665
static int build_enum_path_resources(struct drm_dp_sideband_msg_tx *msg, int port_num)
665
static int build_enum_path_resources(struct drm_dp_sideband_msg_tx *msg, int port_num)
666
{
666
{
667
	struct drm_dp_sideband_msg_req_body req;
667
	struct drm_dp_sideband_msg_req_body req;
668
 
668
 
669
	req.req_type = DP_ENUM_PATH_RESOURCES;
669
	req.req_type = DP_ENUM_PATH_RESOURCES;
670
	req.u.port_num.port_number = port_num;
670
	req.u.port_num.port_number = port_num;
671
	drm_dp_encode_sideband_req(&req, msg);
671
	drm_dp_encode_sideband_req(&req, msg);
672
	msg->path_msg = true;
672
	msg->path_msg = true;
673
	return 0;
673
	return 0;
674
}
674
}
675
 
675
 
676
static int build_allocate_payload(struct drm_dp_sideband_msg_tx *msg, int port_num,
676
static int build_allocate_payload(struct drm_dp_sideband_msg_tx *msg, int port_num,
677
				  u8 vcpi, uint16_t pbn)
677
				  u8 vcpi, uint16_t pbn)
678
{
678
{
679
	struct drm_dp_sideband_msg_req_body req;
679
	struct drm_dp_sideband_msg_req_body req;
680
	memset(&req, 0, sizeof(req));
680
	memset(&req, 0, sizeof(req));
681
	req.req_type = DP_ALLOCATE_PAYLOAD;
681
	req.req_type = DP_ALLOCATE_PAYLOAD;
682
	req.u.allocate_payload.port_number = port_num;
682
	req.u.allocate_payload.port_number = port_num;
683
	req.u.allocate_payload.vcpi = vcpi;
683
	req.u.allocate_payload.vcpi = vcpi;
684
	req.u.allocate_payload.pbn = pbn;
684
	req.u.allocate_payload.pbn = pbn;
685
	drm_dp_encode_sideband_req(&req, msg);
685
	drm_dp_encode_sideband_req(&req, msg);
686
	msg->path_msg = true;
686
	msg->path_msg = true;
687
	return 0;
687
	return 0;
688
}
688
}
689
 
689
 
690
static int drm_dp_mst_assign_payload_id(struct drm_dp_mst_topology_mgr *mgr,
690
static int drm_dp_mst_assign_payload_id(struct drm_dp_mst_topology_mgr *mgr,
691
					struct drm_dp_vcpi *vcpi)
691
					struct drm_dp_vcpi *vcpi)
692
{
692
{
693
	int ret, vcpi_ret;
693
	int ret, vcpi_ret;
694
 
694
 
695
	mutex_lock(&mgr->payload_lock);
695
	mutex_lock(&mgr->payload_lock);
696
	ret = find_first_zero_bit(&mgr->payload_mask, mgr->max_payloads + 1);
696
	ret = find_first_zero_bit(&mgr->payload_mask, mgr->max_payloads + 1);
697
	if (ret > mgr->max_payloads) {
697
	if (ret > mgr->max_payloads) {
698
		ret = -EINVAL;
698
		ret = -EINVAL;
699
		DRM_DEBUG_KMS("out of payload ids %d\n", ret);
699
		DRM_DEBUG_KMS("out of payload ids %d\n", ret);
700
		goto out_unlock;
700
		goto out_unlock;
701
	}
701
	}
702
 
702
 
703
	vcpi_ret = find_first_zero_bit(&mgr->vcpi_mask, mgr->max_payloads + 1);
703
	vcpi_ret = find_first_zero_bit(&mgr->vcpi_mask, mgr->max_payloads + 1);
704
	if (vcpi_ret > mgr->max_payloads) {
704
	if (vcpi_ret > mgr->max_payloads) {
705
		ret = -EINVAL;
705
		ret = -EINVAL;
706
		DRM_DEBUG_KMS("out of vcpi ids %d\n", ret);
706
		DRM_DEBUG_KMS("out of vcpi ids %d\n", ret);
707
		goto out_unlock;
707
		goto out_unlock;
708
	}
708
	}
709
 
709
 
710
	set_bit(ret, &mgr->payload_mask);
710
	set_bit(ret, &mgr->payload_mask);
711
	set_bit(vcpi_ret, &mgr->vcpi_mask);
711
	set_bit(vcpi_ret, &mgr->vcpi_mask);
712
	vcpi->vcpi = vcpi_ret + 1;
712
	vcpi->vcpi = vcpi_ret + 1;
713
	mgr->proposed_vcpis[ret - 1] = vcpi;
713
	mgr->proposed_vcpis[ret - 1] = vcpi;
714
out_unlock:
714
out_unlock:
715
	mutex_unlock(&mgr->payload_lock);
715
	mutex_unlock(&mgr->payload_lock);
716
	return ret;
716
	return ret;
717
}
717
}
718
 
718
 
719
static void drm_dp_mst_put_payload_id(struct drm_dp_mst_topology_mgr *mgr,
719
static void drm_dp_mst_put_payload_id(struct drm_dp_mst_topology_mgr *mgr,
720
				      int vcpi)
720
				      int vcpi)
721
{
721
{
722
	int i;
722
	int i;
723
	if (vcpi == 0)
723
	if (vcpi == 0)
724
		return;
724
		return;
725
 
725
 
726
	mutex_lock(&mgr->payload_lock);
726
	mutex_lock(&mgr->payload_lock);
727
	DRM_DEBUG_KMS("putting payload %d\n", vcpi);
727
	DRM_DEBUG_KMS("putting payload %d\n", vcpi);
728
	clear_bit(vcpi - 1, &mgr->vcpi_mask);
728
	clear_bit(vcpi - 1, &mgr->vcpi_mask);
729
 
729
 
730
	for (i = 0; i < mgr->max_payloads; i++) {
730
	for (i = 0; i < mgr->max_payloads; i++) {
731
		if (mgr->proposed_vcpis[i])
731
		if (mgr->proposed_vcpis[i])
732
			if (mgr->proposed_vcpis[i]->vcpi == vcpi) {
732
			if (mgr->proposed_vcpis[i]->vcpi == vcpi) {
733
				mgr->proposed_vcpis[i] = NULL;
733
				mgr->proposed_vcpis[i] = NULL;
734
				clear_bit(i + 1, &mgr->payload_mask);
734
				clear_bit(i + 1, &mgr->payload_mask);
735
			}
735
			}
736
	}
736
	}
737
	mutex_unlock(&mgr->payload_lock);
737
	mutex_unlock(&mgr->payload_lock);
738
}
738
}
739
 
739
 
740
static bool check_txmsg_state(struct drm_dp_mst_topology_mgr *mgr,
740
static bool check_txmsg_state(struct drm_dp_mst_topology_mgr *mgr,
741
			      struct drm_dp_sideband_msg_tx *txmsg)
741
			      struct drm_dp_sideband_msg_tx *txmsg)
742
{
742
{
743
	bool ret;
743
	bool ret;
744
 
744
 
745
	/*
745
	/*
746
	 * All updates to txmsg->state are protected by mgr->qlock, and the two
746
	 * All updates to txmsg->state are protected by mgr->qlock, and the two
747
	 * cases we check here are terminal states. For those the barriers
747
	 * cases we check here are terminal states. For those the barriers
748
	 * provided by the wake_up/wait_event pair are enough.
748
	 * provided by the wake_up/wait_event pair are enough.
749
	 */
749
	 */
750
	ret = (txmsg->state == DRM_DP_SIDEBAND_TX_RX ||
750
	ret = (txmsg->state == DRM_DP_SIDEBAND_TX_RX ||
751
	       txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT);
751
	       txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT);
752
	return ret;
752
	return ret;
753
}
753
}
754
 
754
 
755
static int drm_dp_mst_wait_tx_reply(struct drm_dp_mst_branch *mstb,
755
static int drm_dp_mst_wait_tx_reply(struct drm_dp_mst_branch *mstb,
756
				    struct drm_dp_sideband_msg_tx *txmsg)
756
				    struct drm_dp_sideband_msg_tx *txmsg)
757
{
757
{
758
	struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
758
	struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
759
	int ret;
759
	int ret;
760
 
760
 
761
	ret = wait_event_timeout(mgr->tx_waitq,
761
	ret = wait_event_timeout(mgr->tx_waitq,
762
				 check_txmsg_state(mgr, txmsg),
762
				 check_txmsg_state(mgr, txmsg),
763
				 (4 * HZ));
763
				 (4 * HZ));
764
	mutex_lock(&mstb->mgr->qlock);
764
	mutex_lock(&mstb->mgr->qlock);
765
	if (ret > 0) {
765
	if (ret > 0) {
766
		if (txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT) {
766
		if (txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT) {
767
			ret = -EIO;
767
			ret = -EIO;
768
			goto out;
768
			goto out;
769
		}
769
		}
770
	} else {
770
	} else {
771
		DRM_DEBUG_KMS("timedout msg send %p %d %d\n", txmsg, txmsg->state, txmsg->seqno);
771
		DRM_DEBUG_KMS("timedout msg send %p %d %d\n", txmsg, txmsg->state, txmsg->seqno);
772
 
772
 
773
		/* dump some state */
773
		/* dump some state */
774
		ret = -EIO;
774
		ret = -EIO;
775
 
775
 
776
		/* remove from q */
776
		/* remove from q */
777
		if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED ||
777
		if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED ||
778
		    txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND) {
778
		    txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND) {
779
			list_del(&txmsg->next);
779
			list_del(&txmsg->next);
780
		}
780
		}
781
 
781
 
782
		if (txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND ||
782
		if (txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND ||
783
		    txmsg->state == DRM_DP_SIDEBAND_TX_SENT) {
783
		    txmsg->state == DRM_DP_SIDEBAND_TX_SENT) {
784
			mstb->tx_slots[txmsg->seqno] = NULL;
784
			mstb->tx_slots[txmsg->seqno] = NULL;
785
		}
785
		}
786
	}
786
	}
787
out:
787
out:
788
	mutex_unlock(&mgr->qlock);
788
	mutex_unlock(&mgr->qlock);
789
 
789
 
790
	return ret;
790
	return ret;
791
}
791
}
792
 
792
 
793
static struct drm_dp_mst_branch *drm_dp_add_mst_branch_device(u8 lct, u8 *rad)
793
static struct drm_dp_mst_branch *drm_dp_add_mst_branch_device(u8 lct, u8 *rad)
794
{
794
{
795
	struct drm_dp_mst_branch *mstb;
795
	struct drm_dp_mst_branch *mstb;
796
 
796
 
797
	mstb = kzalloc(sizeof(*mstb), GFP_KERNEL);
797
	mstb = kzalloc(sizeof(*mstb), GFP_KERNEL);
798
	if (!mstb)
798
	if (!mstb)
799
		return NULL;
799
		return NULL;
800
 
800
 
801
	mstb->lct = lct;
801
	mstb->lct = lct;
802
	if (lct > 1)
802
	if (lct > 1)
803
		memcpy(mstb->rad, rad, lct / 2);
803
		memcpy(mstb->rad, rad, lct / 2);
804
	INIT_LIST_HEAD(&mstb->ports);
804
	INIT_LIST_HEAD(&mstb->ports);
805
	kref_init(&mstb->kref);
805
	kref_init(&mstb->kref);
806
	return mstb;
806
	return mstb;
807
}
807
}
808
 
808
 
809
static void drm_dp_free_mst_port(struct kref *kref);
809
static void drm_dp_free_mst_port(struct kref *kref);
810
 
810
 
811
static void drm_dp_free_mst_branch_device(struct kref *kref)
811
static void drm_dp_free_mst_branch_device(struct kref *kref)
812
{
812
{
813
	struct drm_dp_mst_branch *mstb = container_of(kref, struct drm_dp_mst_branch, kref);
813
	struct drm_dp_mst_branch *mstb = container_of(kref, struct drm_dp_mst_branch, kref);
814
	if (mstb->port_parent) {
814
	if (mstb->port_parent) {
815
		if (list_empty(&mstb->port_parent->next))
815
		if (list_empty(&mstb->port_parent->next))
816
			kref_put(&mstb->port_parent->kref, drm_dp_free_mst_port);
816
			kref_put(&mstb->port_parent->kref, drm_dp_free_mst_port);
817
	}
817
	}
818
	kfree(mstb);
818
	kfree(mstb);
819
}
819
}
820
 
820
 
821
static void drm_dp_destroy_mst_branch_device(struct kref *kref)
821
static void drm_dp_destroy_mst_branch_device(struct kref *kref)
822
{
822
{
823
	struct drm_dp_mst_branch *mstb = container_of(kref, struct drm_dp_mst_branch, kref);
823
	struct drm_dp_mst_branch *mstb = container_of(kref, struct drm_dp_mst_branch, kref);
824
	struct drm_dp_mst_port *port, *tmp;
824
	struct drm_dp_mst_port *port, *tmp;
825
	bool wake_tx = false;
825
	bool wake_tx = false;
826
 
826
 
827
	/*
827
	/*
828
	 * init kref again to be used by ports to remove mst branch when it is
828
	 * init kref again to be used by ports to remove mst branch when it is
829
	 * not needed anymore
829
	 * not needed anymore
830
	 */
830
	 */
831
	kref_init(kref);
831
	kref_init(kref);
832
 
832
 
833
	if (mstb->port_parent && list_empty(&mstb->port_parent->next))
833
	if (mstb->port_parent && list_empty(&mstb->port_parent->next))
834
		kref_get(&mstb->port_parent->kref);
834
		kref_get(&mstb->port_parent->kref);
835
 
835
 
836
	/*
836
	/*
837
	 * destroy all ports - don't need lock
837
	 * destroy all ports - don't need lock
838
	 * as there are no more references to the mst branch
838
	 * as there are no more references to the mst branch
839
	 * device at this point.
839
	 * device at this point.
840
	 */
840
	 */
841
	list_for_each_entry_safe(port, tmp, &mstb->ports, next) {
841
	list_for_each_entry_safe(port, tmp, &mstb->ports, next) {
842
		list_del(&port->next);
842
		list_del(&port->next);
843
		drm_dp_put_port(port);
843
		drm_dp_put_port(port);
844
	}
844
	}
845
 
845
 
846
	/* drop any tx slots msg */
846
	/* drop any tx slots msg */
847
	mutex_lock(&mstb->mgr->qlock);
847
	mutex_lock(&mstb->mgr->qlock);
848
	if (mstb->tx_slots[0]) {
848
	if (mstb->tx_slots[0]) {
849
		mstb->tx_slots[0]->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
849
		mstb->tx_slots[0]->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
850
		mstb->tx_slots[0] = NULL;
850
		mstb->tx_slots[0] = NULL;
851
		wake_tx = true;
851
		wake_tx = true;
852
	}
852
	}
853
	if (mstb->tx_slots[1]) {
853
	if (mstb->tx_slots[1]) {
854
		mstb->tx_slots[1]->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
854
		mstb->tx_slots[1]->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
855
		mstb->tx_slots[1] = NULL;
855
		mstb->tx_slots[1] = NULL;
856
		wake_tx = true;
856
		wake_tx = true;
857
	}
857
	}
858
	mutex_unlock(&mstb->mgr->qlock);
858
	mutex_unlock(&mstb->mgr->qlock);
859
 
859
 
860
//   if (wake_tx)
860
//   if (wake_tx)
861
//       wake_up(&mstb->mgr->tx_waitq);
861
//       wake_up(&mstb->mgr->tx_waitq);
862
	kref_put(kref, drm_dp_free_mst_branch_device);
862
	kref_put(kref, drm_dp_free_mst_branch_device);
863
}
863
}
864
 
864
 
865
static void drm_dp_put_mst_branch_device(struct drm_dp_mst_branch *mstb)
865
static void drm_dp_put_mst_branch_device(struct drm_dp_mst_branch *mstb)
866
{
866
{
867
	kref_put(&mstb->kref, drm_dp_destroy_mst_branch_device);
867
	kref_put(&mstb->kref, drm_dp_destroy_mst_branch_device);
868
}
868
}
869
 
869
 
870
 
870
 
871
static void drm_dp_port_teardown_pdt(struct drm_dp_mst_port *port, int old_pdt)
871
static void drm_dp_port_teardown_pdt(struct drm_dp_mst_port *port, int old_pdt)
872
{
872
{
873
	struct drm_dp_mst_branch *mstb;
873
	struct drm_dp_mst_branch *mstb;
874
 
874
 
875
	switch (old_pdt) {
875
	switch (old_pdt) {
876
	case DP_PEER_DEVICE_DP_LEGACY_CONV:
876
	case DP_PEER_DEVICE_DP_LEGACY_CONV:
877
	case DP_PEER_DEVICE_SST_SINK:
877
	case DP_PEER_DEVICE_SST_SINK:
878
		/* remove i2c over sideband */
878
		/* remove i2c over sideband */
879
		drm_dp_mst_unregister_i2c_bus(&port->aux);
879
		drm_dp_mst_unregister_i2c_bus(&port->aux);
880
		break;
880
		break;
881
	case DP_PEER_DEVICE_MST_BRANCHING:
881
	case DP_PEER_DEVICE_MST_BRANCHING:
882
		mstb = port->mstb;
882
		mstb = port->mstb;
883
		port->mstb = NULL;
883
		port->mstb = NULL;
884
		drm_dp_put_mst_branch_device(mstb);
884
		drm_dp_put_mst_branch_device(mstb);
885
		break;
885
		break;
886
	}
886
	}
887
}
887
}
888
 
888
 
889
static void drm_dp_destroy_port(struct kref *kref)
889
static void drm_dp_destroy_port(struct kref *kref)
890
{
890
{
891
	struct drm_dp_mst_port *port = container_of(kref, struct drm_dp_mst_port, kref);
891
	struct drm_dp_mst_port *port = container_of(kref, struct drm_dp_mst_port, kref);
892
	struct drm_dp_mst_topology_mgr *mgr = port->mgr;
892
	struct drm_dp_mst_topology_mgr *mgr = port->mgr;
893
 
893
 
894
	if (!port->input) {
894
	if (!port->input) {
895
		port->vcpi.num_slots = 0;
895
		port->vcpi.num_slots = 0;
896
 
896
 
897
		kfree(port->cached_edid);
897
		kfree(port->cached_edid);
898
 
898
 
899
		/*
899
		/*
900
		 * The only time we don't have a connector
900
		 * The only time we don't have a connector
901
		 * on an output port is if the connector init
901
		 * on an output port is if the connector init
902
		 * fails.
902
		 * fails.
903
		 */
903
		 */
904
		if (port->connector) {
904
		if (port->connector) {
905
			/* we can't destroy the connector here, as
905
			/* we can't destroy the connector here, as
906
			 * we might be holding the mode_config.mutex
906
			 * we might be holding the mode_config.mutex
907
			 * from an EDID retrieval */
907
			 * from an EDID retrieval */
908
 
908
 
909
			mutex_lock(&mgr->destroy_connector_lock);
909
			mutex_lock(&mgr->destroy_connector_lock);
910
			kref_get(&port->parent->kref);
910
			kref_get(&port->parent->kref);
911
			list_add(&port->next, &mgr->destroy_connector_list);
911
			list_add(&port->next, &mgr->destroy_connector_list);
912
			mutex_unlock(&mgr->destroy_connector_lock);
912
			mutex_unlock(&mgr->destroy_connector_lock);
913
//		schedule_work(&mgr->destroy_connector_work);
913
//		schedule_work(&mgr->destroy_connector_work);
914
			return;
914
			return;
915
		}
915
		}
916
		/* no need to clean up vcpi
916
		/* no need to clean up vcpi
917
		 * as if we have no connector we never setup a vcpi */
917
		 * as if we have no connector we never setup a vcpi */
918
		drm_dp_port_teardown_pdt(port, port->pdt);
918
		drm_dp_port_teardown_pdt(port, port->pdt);
919
	}
919
	}
920
	kfree(port);
920
	kfree(port);
921
}
921
}
922
 
922
 
923
static void drm_dp_put_port(struct drm_dp_mst_port *port)
923
static void drm_dp_put_port(struct drm_dp_mst_port *port)
924
{
924
{
925
	kref_put(&port->kref, drm_dp_destroy_port);
925
	kref_put(&port->kref, drm_dp_destroy_port);
926
}
926
}
927
 
927
 
928
static struct drm_dp_mst_branch *drm_dp_mst_get_validated_mstb_ref_locked(struct drm_dp_mst_branch *mstb, struct drm_dp_mst_branch *to_find)
928
static struct drm_dp_mst_branch *drm_dp_mst_get_validated_mstb_ref_locked(struct drm_dp_mst_branch *mstb, struct drm_dp_mst_branch *to_find)
929
{
929
{
930
	struct drm_dp_mst_port *port;
930
	struct drm_dp_mst_port *port;
931
	struct drm_dp_mst_branch *rmstb;
931
	struct drm_dp_mst_branch *rmstb;
932
	if (to_find == mstb) {
932
	if (to_find == mstb) {
933
		kref_get(&mstb->kref);
933
		kref_get(&mstb->kref);
934
		return mstb;
934
		return mstb;
935
	}
935
	}
936
	list_for_each_entry(port, &mstb->ports, next) {
936
	list_for_each_entry(port, &mstb->ports, next) {
937
		if (port->mstb) {
937
		if (port->mstb) {
938
			rmstb = drm_dp_mst_get_validated_mstb_ref_locked(port->mstb, to_find);
938
			rmstb = drm_dp_mst_get_validated_mstb_ref_locked(port->mstb, to_find);
939
			if (rmstb)
939
			if (rmstb)
940
				return rmstb;
940
				return rmstb;
941
		}
941
		}
942
	}
942
	}
943
	return NULL;
943
	return NULL;
944
}
944
}
945
 
945
 
946
static struct drm_dp_mst_branch *drm_dp_get_validated_mstb_ref(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_branch *mstb)
946
static struct drm_dp_mst_branch *drm_dp_get_validated_mstb_ref(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_branch *mstb)
947
{
947
{
948
	struct drm_dp_mst_branch *rmstb = NULL;
948
	struct drm_dp_mst_branch *rmstb = NULL;
949
	mutex_lock(&mgr->lock);
949
	mutex_lock(&mgr->lock);
950
	if (mgr->mst_primary)
950
	if (mgr->mst_primary)
951
		rmstb = drm_dp_mst_get_validated_mstb_ref_locked(mgr->mst_primary, mstb);
951
		rmstb = drm_dp_mst_get_validated_mstb_ref_locked(mgr->mst_primary, mstb);
952
	mutex_unlock(&mgr->lock);
952
	mutex_unlock(&mgr->lock);
953
	return rmstb;
953
	return rmstb;
954
}
954
}
955
 
955
 
956
static struct drm_dp_mst_port *drm_dp_mst_get_port_ref_locked(struct drm_dp_mst_branch *mstb, struct drm_dp_mst_port *to_find)
956
static struct drm_dp_mst_port *drm_dp_mst_get_port_ref_locked(struct drm_dp_mst_branch *mstb, struct drm_dp_mst_port *to_find)
957
{
957
{
958
	struct drm_dp_mst_port *port, *mport;
958
	struct drm_dp_mst_port *port, *mport;
959
 
959
 
960
	list_for_each_entry(port, &mstb->ports, next) {
960
	list_for_each_entry(port, &mstb->ports, next) {
961
		if (port == to_find) {
961
		if (port == to_find) {
962
			kref_get(&port->kref);
962
			kref_get(&port->kref);
963
			return port;
963
			return port;
964
		}
964
		}
965
		if (port->mstb) {
965
		if (port->mstb) {
966
			mport = drm_dp_mst_get_port_ref_locked(port->mstb, to_find);
966
			mport = drm_dp_mst_get_port_ref_locked(port->mstb, to_find);
967
			if (mport)
967
			if (mport)
968
				return mport;
968
				return mport;
969
		}
969
		}
970
	}
970
	}
971
	return NULL;
971
	return NULL;
972
}
972
}
973
 
973
 
974
static struct drm_dp_mst_port *drm_dp_get_validated_port_ref(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
974
static struct drm_dp_mst_port *drm_dp_get_validated_port_ref(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
975
{
975
{
976
	struct drm_dp_mst_port *rport = NULL;
976
	struct drm_dp_mst_port *rport = NULL;
977
	mutex_lock(&mgr->lock);
977
	mutex_lock(&mgr->lock);
978
	if (mgr->mst_primary)
978
	if (mgr->mst_primary)
979
		rport = drm_dp_mst_get_port_ref_locked(mgr->mst_primary, port);
979
		rport = drm_dp_mst_get_port_ref_locked(mgr->mst_primary, port);
980
	mutex_unlock(&mgr->lock);
980
	mutex_unlock(&mgr->lock);
981
	return rport;
981
	return rport;
982
}
982
}
983
 
983
 
984
static struct drm_dp_mst_port *drm_dp_get_port(struct drm_dp_mst_branch *mstb, u8 port_num)
984
static struct drm_dp_mst_port *drm_dp_get_port(struct drm_dp_mst_branch *mstb, u8 port_num)
985
{
985
{
986
	struct drm_dp_mst_port *port;
986
	struct drm_dp_mst_port *port;
987
 
987
 
988
	list_for_each_entry(port, &mstb->ports, next) {
988
	list_for_each_entry(port, &mstb->ports, next) {
989
		if (port->port_num == port_num) {
989
		if (port->port_num == port_num) {
990
			kref_get(&port->kref);
990
			kref_get(&port->kref);
991
			return port;
991
			return port;
992
		}
992
		}
993
	}
993
	}
994
 
994
 
995
	return NULL;
995
	return NULL;
996
}
996
}
997
 
997
 
998
/*
998
/*
999
 * calculate a new RAD for this MST branch device
999
 * calculate a new RAD for this MST branch device
1000
 * if parent has an LCT of 2 then it has 1 nibble of RAD,
1000
 * if parent has an LCT of 2 then it has 1 nibble of RAD,
1001
 * if parent has an LCT of 3 then it has 2 nibbles of RAD,
1001
 * if parent has an LCT of 3 then it has 2 nibbles of RAD,
1002
 */
1002
 */
1003
static u8 drm_dp_calculate_rad(struct drm_dp_mst_port *port,
1003
static u8 drm_dp_calculate_rad(struct drm_dp_mst_port *port,
1004
				 u8 *rad)
1004
				 u8 *rad)
1005
{
1005
{
1006
	int parent_lct = port->parent->lct;
1006
	int parent_lct = port->parent->lct;
1007
	int shift = 4;
1007
	int shift = 4;
1008
	int idx = (parent_lct - 1) / 2;
1008
	int idx = (parent_lct - 1) / 2;
1009
	if (parent_lct > 1) {
1009
	if (parent_lct > 1) {
1010
		memcpy(rad, port->parent->rad, idx + 1);
1010
		memcpy(rad, port->parent->rad, idx + 1);
1011
		shift = (parent_lct % 2) ? 4 : 0;
1011
		shift = (parent_lct % 2) ? 4 : 0;
1012
	} else
1012
	} else
1013
		rad[0] = 0;
1013
		rad[0] = 0;
1014
 
1014
 
1015
	rad[idx] |= port->port_num << shift;
1015
	rad[idx] |= port->port_num << shift;
1016
	return parent_lct + 1;
1016
	return parent_lct + 1;
1017
}
1017
}
1018
 
1018
 
1019
/*
1019
/*
1020
 * return sends link address for new mstb
1020
 * return sends link address for new mstb
1021
 */
1021
 */
1022
static bool drm_dp_port_setup_pdt(struct drm_dp_mst_port *port)
1022
static bool drm_dp_port_setup_pdt(struct drm_dp_mst_port *port)
1023
{
1023
{
1024
	int ret;
1024
	int ret;
1025
	u8 rad[6], lct;
1025
	u8 rad[6], lct;
1026
	bool send_link = false;
1026
	bool send_link = false;
1027
	switch (port->pdt) {
1027
	switch (port->pdt) {
1028
	case DP_PEER_DEVICE_DP_LEGACY_CONV:
1028
	case DP_PEER_DEVICE_DP_LEGACY_CONV:
1029
	case DP_PEER_DEVICE_SST_SINK:
1029
	case DP_PEER_DEVICE_SST_SINK:
1030
		/* add i2c over sideband */
1030
		/* add i2c over sideband */
1031
		ret = drm_dp_mst_register_i2c_bus(&port->aux);
1031
		ret = drm_dp_mst_register_i2c_bus(&port->aux);
1032
		break;
1032
		break;
1033
	case DP_PEER_DEVICE_MST_BRANCHING:
1033
	case DP_PEER_DEVICE_MST_BRANCHING:
1034
		lct = drm_dp_calculate_rad(port, rad);
1034
		lct = drm_dp_calculate_rad(port, rad);
1035
 
1035
 
1036
		port->mstb = drm_dp_add_mst_branch_device(lct, rad);
1036
		port->mstb = drm_dp_add_mst_branch_device(lct, rad);
1037
		port->mstb->mgr = port->mgr;
1037
		port->mstb->mgr = port->mgr;
1038
		port->mstb->port_parent = port;
1038
		port->mstb->port_parent = port;
1039
 
1039
 
1040
		send_link = true;
1040
		send_link = true;
1041
		break;
1041
		break;
1042
	}
1042
	}
1043
	return send_link;
1043
	return send_link;
1044
}
1044
}
1045
 
1045
 
1046
static void drm_dp_check_mstb_guid(struct drm_dp_mst_branch *mstb, u8 *guid)
1046
static void drm_dp_check_mstb_guid(struct drm_dp_mst_branch *mstb, u8 *guid)
1047
{
1047
{
1048
	int ret;
1048
	int ret;
1049
 
1049
 
1050
	memcpy(mstb->guid, guid, 16);
1050
	memcpy(mstb->guid, guid, 16);
1051
 
1051
 
1052
	if (!drm_dp_validate_guid(mstb->mgr, mstb->guid)) {
1052
	if (!drm_dp_validate_guid(mstb->mgr, mstb->guid)) {
1053
		if (mstb->port_parent) {
1053
		if (mstb->port_parent) {
1054
			ret = drm_dp_send_dpcd_write(
1054
			ret = drm_dp_send_dpcd_write(
1055
					mstb->mgr,
1055
					mstb->mgr,
1056
					mstb->port_parent,
1056
					mstb->port_parent,
1057
					DP_GUID,
1057
					DP_GUID,
1058
					16,
1058
					16,
1059
					mstb->guid);
1059
					mstb->guid);
1060
		} else {
1060
		} else {
1061
 
1061
 
1062
			ret = drm_dp_dpcd_write(
1062
			ret = drm_dp_dpcd_write(
1063
					mstb->mgr->aux,
1063
					mstb->mgr->aux,
1064
						     DP_GUID,
1064
						     DP_GUID,
1065
					mstb->guid,
1065
					mstb->guid,
1066
					16);
1066
					16);
1067
		}
1067
		}
1068
	}
1068
	}
1069
}
1069
}
1070
 
1070
 
1071
static void build_mst_prop_path(const struct drm_dp_mst_branch *mstb,
1071
static void build_mst_prop_path(const struct drm_dp_mst_branch *mstb,
1072
				int pnum,
1072
				int pnum,
1073
				char *proppath,
1073
				char *proppath,
1074
				size_t proppath_size)
1074
				size_t proppath_size)
1075
{
1075
{
1076
	int i;
1076
	int i;
1077
	char temp[8];
1077
	char temp[8];
1078
	snprintf(proppath, proppath_size, "mst:%d", mstb->mgr->conn_base_id);
1078
	snprintf(proppath, proppath_size, "mst:%d", mstb->mgr->conn_base_id);
1079
	for (i = 0; i < (mstb->lct - 1); i++) {
1079
	for (i = 0; i < (mstb->lct - 1); i++) {
1080
		int shift = (i % 2) ? 0 : 4;
1080
		int shift = (i % 2) ? 0 : 4;
1081
		int port_num = (mstb->rad[i / 2] >> shift) & 0xf;
1081
		int port_num = (mstb->rad[i / 2] >> shift) & 0xf;
1082
		snprintf(temp, sizeof(temp), "-%d", port_num);
1082
		snprintf(temp, sizeof(temp), "-%d", port_num);
1083
		strlcat(proppath, temp, proppath_size);
1083
		strlcat(proppath, temp, proppath_size);
1084
	}
1084
	}
1085
	snprintf(temp, sizeof(temp), "-%d", pnum);
1085
	snprintf(temp, sizeof(temp), "-%d", pnum);
1086
	strlcat(proppath, temp, proppath_size);
1086
	strlcat(proppath, temp, proppath_size);
1087
}
1087
}
1088
 
1088
 
1089
static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
1089
static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
1090
			    struct device *dev,
1090
			    struct device *dev,
1091
			    struct drm_dp_link_addr_reply_port *port_msg)
1091
			    struct drm_dp_link_addr_reply_port *port_msg)
1092
{
1092
{
1093
	struct drm_dp_mst_port *port;
1093
	struct drm_dp_mst_port *port;
1094
	bool ret;
1094
	bool ret;
1095
	bool created = false;
1095
	bool created = false;
1096
	int old_pdt = 0;
1096
	int old_pdt = 0;
1097
	int old_ddps = 0;
1097
	int old_ddps = 0;
1098
	port = drm_dp_get_port(mstb, port_msg->port_number);
1098
	port = drm_dp_get_port(mstb, port_msg->port_number);
1099
	if (!port) {
1099
	if (!port) {
1100
		port = kzalloc(sizeof(*port), GFP_KERNEL);
1100
		port = kzalloc(sizeof(*port), GFP_KERNEL);
1101
		if (!port)
1101
		if (!port)
1102
			return;
1102
			return;
1103
		kref_init(&port->kref);
1103
		kref_init(&port->kref);
1104
		port->parent = mstb;
1104
		port->parent = mstb;
1105
		port->port_num = port_msg->port_number;
1105
		port->port_num = port_msg->port_number;
1106
		port->mgr = mstb->mgr;
1106
		port->mgr = mstb->mgr;
1107
		port->aux.name = "DPMST";
1107
		port->aux.name = "DPMST";
1108
		port->aux.dev = dev;
1108
		port->aux.dev = dev;
1109
		created = true;
1109
		created = true;
1110
	} else {
1110
	} else {
1111
		old_pdt = port->pdt;
1111
		old_pdt = port->pdt;
1112
		old_ddps = port->ddps;
1112
		old_ddps = port->ddps;
1113
	}
1113
	}
1114
 
1114
 
1115
	port->pdt = port_msg->peer_device_type;
1115
	port->pdt = port_msg->peer_device_type;
1116
	port->input = port_msg->input_port;
1116
	port->input = port_msg->input_port;
1117
	port->mcs = port_msg->mcs;
1117
	port->mcs = port_msg->mcs;
1118
	port->ddps = port_msg->ddps;
1118
	port->ddps = port_msg->ddps;
1119
	port->ldps = port_msg->legacy_device_plug_status;
1119
	port->ldps = port_msg->legacy_device_plug_status;
1120
	port->dpcd_rev = port_msg->dpcd_revision;
1120
	port->dpcd_rev = port_msg->dpcd_revision;
1121
	port->num_sdp_streams = port_msg->num_sdp_streams;
1121
	port->num_sdp_streams = port_msg->num_sdp_streams;
1122
	port->num_sdp_stream_sinks = port_msg->num_sdp_stream_sinks;
1122
	port->num_sdp_stream_sinks = port_msg->num_sdp_stream_sinks;
1123
 
1123
 
1124
	/* manage mstb port lists with mgr lock - take a reference
1124
	/* manage mstb port lists with mgr lock - take a reference
1125
	   for this list */
1125
	   for this list */
1126
	if (created) {
1126
	if (created) {
1127
		mutex_lock(&mstb->mgr->lock);
1127
		mutex_lock(&mstb->mgr->lock);
1128
		kref_get(&port->kref);
1128
		kref_get(&port->kref);
1129
		list_add(&port->next, &mstb->ports);
1129
		list_add(&port->next, &mstb->ports);
1130
		mutex_unlock(&mstb->mgr->lock);
1130
		mutex_unlock(&mstb->mgr->lock);
1131
	}
1131
	}
1132
 
1132
 
1133
	if (old_ddps != port->ddps) {
1133
	if (old_ddps != port->ddps) {
1134
		if (port->ddps) {
1134
		if (port->ddps) {
1135
			if (!port->input)
1135
			if (!port->input)
1136
				drm_dp_send_enum_path_resources(mstb->mgr, mstb, port);
1136
				drm_dp_send_enum_path_resources(mstb->mgr, mstb, port);
1137
		} else {
1137
		} else {
1138
			port->available_pbn = 0;
1138
			port->available_pbn = 0;
1139
			}
1139
			}
1140
	}
1140
	}
1141
 
1141
 
1142
	if (old_pdt != port->pdt && !port->input) {
1142
	if (old_pdt != port->pdt && !port->input) {
1143
		drm_dp_port_teardown_pdt(port, old_pdt);
1143
		drm_dp_port_teardown_pdt(port, old_pdt);
1144
 
1144
 
1145
		ret = drm_dp_port_setup_pdt(port);
1145
		ret = drm_dp_port_setup_pdt(port);
1146
		if (ret == true)
1146
		if (ret == true)
1147
			drm_dp_send_link_address(mstb->mgr, port->mstb);
1147
			drm_dp_send_link_address(mstb->mgr, port->mstb);
1148
	}
1148
	}
1149
 
1149
 
1150
	if (created && !port->input) {
1150
	if (created && !port->input) {
1151
		char proppath[255];
1151
		char proppath[255];
1152
 
1152
 
1153
		build_mst_prop_path(mstb, port->port_num, proppath, sizeof(proppath));
1153
		build_mst_prop_path(mstb, port->port_num, proppath, sizeof(proppath));
1154
		port->connector = (*mstb->mgr->cbs->add_connector)(mstb->mgr, port, proppath);
1154
		port->connector = (*mstb->mgr->cbs->add_connector)(mstb->mgr, port, proppath);
1155
		if (!port->connector) {
1155
		if (!port->connector) {
1156
			/* remove it from the port list */
1156
			/* remove it from the port list */
1157
			mutex_lock(&mstb->mgr->lock);
1157
			mutex_lock(&mstb->mgr->lock);
1158
			list_del(&port->next);
1158
			list_del(&port->next);
1159
			mutex_unlock(&mstb->mgr->lock);
1159
			mutex_unlock(&mstb->mgr->lock);
1160
			/* drop port list reference */
1160
			/* drop port list reference */
1161
			drm_dp_put_port(port);
1161
			drm_dp_put_port(port);
1162
			goto out;
1162
			goto out;
1163
		}
1163
		}
1164
		if (port->port_num >= DP_MST_LOGICAL_PORT_0) {
1164
		if (port->port_num >= DP_MST_LOGICAL_PORT_0) {
1165
			port->cached_edid = drm_get_edid(port->connector, &port->aux.ddc);
1165
			port->cached_edid = drm_get_edid(port->connector, &port->aux.ddc);
1166
			drm_mode_connector_set_tile_property(port->connector);
1166
			drm_mode_connector_set_tile_property(port->connector);
1167
		}
1167
		}
1168
		(*mstb->mgr->cbs->register_connector)(port->connector);
1168
		(*mstb->mgr->cbs->register_connector)(port->connector);
1169
	}
1169
	}
1170
 
1170
 
1171
out:
1171
out:
1172
	/* put reference to this port */
1172
	/* put reference to this port */
1173
	drm_dp_put_port(port);
1173
	drm_dp_put_port(port);
1174
}
1174
}
1175
 
1175
 
1176
static void drm_dp_update_port(struct drm_dp_mst_branch *mstb,
1176
static void drm_dp_update_port(struct drm_dp_mst_branch *mstb,
1177
			       struct drm_dp_connection_status_notify *conn_stat)
1177
			       struct drm_dp_connection_status_notify *conn_stat)
1178
{
1178
{
1179
	struct drm_dp_mst_port *port;
1179
	struct drm_dp_mst_port *port;
1180
	int old_pdt;
1180
	int old_pdt;
1181
	int old_ddps;
1181
	int old_ddps;
1182
	bool dowork = false;
1182
	bool dowork = false;
1183
	port = drm_dp_get_port(mstb, conn_stat->port_number);
1183
	port = drm_dp_get_port(mstb, conn_stat->port_number);
1184
	if (!port)
1184
	if (!port)
1185
		return;
1185
		return;
1186
 
1186
 
1187
	old_ddps = port->ddps;
1187
	old_ddps = port->ddps;
1188
	old_pdt = port->pdt;
1188
	old_pdt = port->pdt;
1189
	port->pdt = conn_stat->peer_device_type;
1189
	port->pdt = conn_stat->peer_device_type;
1190
	port->mcs = conn_stat->message_capability_status;
1190
	port->mcs = conn_stat->message_capability_status;
1191
	port->ldps = conn_stat->legacy_device_plug_status;
1191
	port->ldps = conn_stat->legacy_device_plug_status;
1192
	port->ddps = conn_stat->displayport_device_plug_status;
1192
	port->ddps = conn_stat->displayport_device_plug_status;
1193
 
1193
 
1194
	if (old_ddps != port->ddps) {
1194
	if (old_ddps != port->ddps) {
1195
		if (port->ddps) {
1195
		if (port->ddps) {
1196
			dowork = true;
1196
			dowork = true;
1197
		} else {
1197
		} else {
1198
			port->available_pbn = 0;
1198
			port->available_pbn = 0;
1199
		}
1199
		}
1200
	}
1200
	}
1201
	if (old_pdt != port->pdt && !port->input) {
1201
	if (old_pdt != port->pdt && !port->input) {
1202
		drm_dp_port_teardown_pdt(port, old_pdt);
1202
		drm_dp_port_teardown_pdt(port, old_pdt);
1203
 
1203
 
1204
		if (drm_dp_port_setup_pdt(port))
1204
		if (drm_dp_port_setup_pdt(port))
1205
			dowork = true;
1205
			dowork = true;
1206
	}
1206
	}
1207
 
1207
 
1208
	drm_dp_put_port(port);
1208
	drm_dp_put_port(port);
1209
//   if (dowork)
1209
//   if (dowork)
1210
//       queue_work(system_long_wq, &mstb->mgr->work);
1210
//       queue_work(system_long_wq, &mstb->mgr->work);
1211
 
1211
 
1212
}
1212
}
1213
 
1213
 
1214
static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_topology_mgr *mgr,
1214
static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_topology_mgr *mgr,
1215
							       u8 lct, u8 *rad)
1215
							       u8 lct, u8 *rad)
1216
{
1216
{
1217
	struct drm_dp_mst_branch *mstb;
1217
	struct drm_dp_mst_branch *mstb;
1218
	struct drm_dp_mst_port *port;
1218
	struct drm_dp_mst_port *port;
1219
	int i;
1219
	int i;
1220
	/* find the port by iterating down */
1220
	/* find the port by iterating down */
1221
 
1221
 
1222
	mutex_lock(&mgr->lock);
1222
	mutex_lock(&mgr->lock);
1223
	mstb = mgr->mst_primary;
1223
	mstb = mgr->mst_primary;
1224
 
1224
 
1225
	for (i = 0; i < lct - 1; i++) {
1225
	for (i = 0; i < lct - 1; i++) {
1226
		int shift = (i % 2) ? 0 : 4;
1226
		int shift = (i % 2) ? 0 : 4;
1227
		int port_num = (rad[i / 2] >> shift) & 0xf;
1227
		int port_num = (rad[i / 2] >> shift) & 0xf;
1228
 
1228
 
1229
		list_for_each_entry(port, &mstb->ports, next) {
1229
		list_for_each_entry(port, &mstb->ports, next) {
1230
			if (port->port_num == port_num) {
1230
			if (port->port_num == port_num) {
1231
				mstb = port->mstb;
1231
				mstb = port->mstb;
1232
				if (!mstb) {
1232
				if (!mstb) {
1233
					DRM_ERROR("failed to lookup MSTB with lct %d, rad %02x\n", lct, rad[0]);
1233
					DRM_ERROR("failed to lookup MSTB with lct %d, rad %02x\n", lct, rad[0]);
1234
					goto out;
1234
					goto out;
1235
				}
1235
				}
1236
 
1236
 
1237
				break;
1237
				break;
1238
			}
1238
			}
1239
		}
1239
		}
1240
	}
1240
	}
1241
	kref_get(&mstb->kref);
1241
	kref_get(&mstb->kref);
1242
out:
1242
out:
1243
	mutex_unlock(&mgr->lock);
1243
	mutex_unlock(&mgr->lock);
1244
	return mstb;
1244
	return mstb;
1245
}
1245
}
1246
 
1246
 
1247
static struct drm_dp_mst_branch *get_mst_branch_device_by_guid_helper(
1247
static struct drm_dp_mst_branch *get_mst_branch_device_by_guid_helper(
1248
	struct drm_dp_mst_branch *mstb,
1248
	struct drm_dp_mst_branch *mstb,
1249
	uint8_t *guid)
1249
	uint8_t *guid)
1250
{
1250
{
1251
	struct drm_dp_mst_branch *found_mstb;
1251
	struct drm_dp_mst_branch *found_mstb;
1252
	struct drm_dp_mst_port *port;
1252
	struct drm_dp_mst_port *port;
1253
 
1253
 
1254
	if (memcmp(mstb->guid, guid, 16) == 0)
1254
	if (memcmp(mstb->guid, guid, 16) == 0)
1255
		return mstb;
1255
		return mstb;
1256
 
1256
 
1257
 
1257
 
1258
	list_for_each_entry(port, &mstb->ports, next) {
1258
	list_for_each_entry(port, &mstb->ports, next) {
1259
		if (!port->mstb)
1259
		if (!port->mstb)
1260
			continue;
1260
			continue;
1261
 
1261
 
1262
		found_mstb = get_mst_branch_device_by_guid_helper(port->mstb, guid);
1262
		found_mstb = get_mst_branch_device_by_guid_helper(port->mstb, guid);
1263
 
1263
 
1264
		if (found_mstb)
1264
		if (found_mstb)
1265
			return found_mstb;
1265
			return found_mstb;
1266
	}
1266
	}
1267
 
1267
 
1268
	return NULL;
1268
	return NULL;
1269
}
1269
}
1270
 
1270
 
1271
static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device_by_guid(
1271
static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device_by_guid(
1272
	struct drm_dp_mst_topology_mgr *mgr,
1272
	struct drm_dp_mst_topology_mgr *mgr,
1273
	uint8_t *guid)
1273
	uint8_t *guid)
1274
{
1274
{
1275
	struct drm_dp_mst_branch *mstb;
1275
	struct drm_dp_mst_branch *mstb;
1276
 
1276
 
1277
	/* find the port by iterating down */
1277
	/* find the port by iterating down */
1278
	mutex_lock(&mgr->lock);
1278
	mutex_lock(&mgr->lock);
1279
 
1279
 
1280
	mstb = get_mst_branch_device_by_guid_helper(mgr->mst_primary, guid);
1280
	mstb = get_mst_branch_device_by_guid_helper(mgr->mst_primary, guid);
1281
 
1281
 
1282
	if (mstb)
1282
	if (mstb)
1283
		kref_get(&mstb->kref);
1283
		kref_get(&mstb->kref);
1284
 
1284
 
1285
	mutex_unlock(&mgr->lock);
1285
	mutex_unlock(&mgr->lock);
1286
	return mstb;
1286
	return mstb;
1287
}
1287
}
1288
 
1288
 
1289
static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
1289
static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
1290
					       struct drm_dp_mst_branch *mstb)
1290
					       struct drm_dp_mst_branch *mstb)
1291
{
1291
{
1292
	struct drm_dp_mst_port *port;
1292
	struct drm_dp_mst_port *port;
1293
	struct drm_dp_mst_branch *mstb_child;
1293
	struct drm_dp_mst_branch *mstb_child;
1294
	if (!mstb->link_address_sent)
1294
	if (!mstb->link_address_sent)
1295
		drm_dp_send_link_address(mgr, mstb);
1295
		drm_dp_send_link_address(mgr, mstb);
1296
 
1296
 
1297
	list_for_each_entry(port, &mstb->ports, next) {
1297
	list_for_each_entry(port, &mstb->ports, next) {
1298
		if (port->input)
1298
		if (port->input)
1299
			continue;
1299
			continue;
1300
 
1300
 
1301
		if (!port->ddps)
1301
		if (!port->ddps)
1302
			continue;
1302
			continue;
1303
 
1303
 
1304
		if (!port->available_pbn)
1304
		if (!port->available_pbn)
1305
			drm_dp_send_enum_path_resources(mgr, mstb, port);
1305
			drm_dp_send_enum_path_resources(mgr, mstb, port);
1306
 
1306
 
1307
		if (port->mstb) {
1307
		if (port->mstb) {
1308
			mstb_child = drm_dp_get_validated_mstb_ref(mgr, port->mstb);
1308
			mstb_child = drm_dp_get_validated_mstb_ref(mgr, port->mstb);
1309
			if (mstb_child) {
1309
			if (mstb_child) {
1310
				drm_dp_check_and_send_link_address(mgr, mstb_child);
1310
				drm_dp_check_and_send_link_address(mgr, mstb_child);
1311
				drm_dp_put_mst_branch_device(mstb_child);
1311
				drm_dp_put_mst_branch_device(mstb_child);
1312
			}
1312
			}
1313
		}
1313
		}
1314
	}
1314
	}
1315
}
1315
}
1316
 
1316
 
1317
static void drm_dp_mst_link_probe_work(struct work_struct *work)
1317
static void drm_dp_mst_link_probe_work(struct work_struct *work)
1318
{
1318
{
1319
	struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, work);
1319
	struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, work);
1320
	struct drm_dp_mst_branch *mstb;
1320
	struct drm_dp_mst_branch *mstb;
1321
 
1321
 
1322
	mutex_lock(&mgr->lock);
1322
	mutex_lock(&mgr->lock);
1323
	mstb = mgr->mst_primary;
1323
	mstb = mgr->mst_primary;
1324
	if (mstb) {
1324
	if (mstb) {
1325
		kref_get(&mstb->kref);
1325
		kref_get(&mstb->kref);
1326
	}
1326
	}
1327
	mutex_unlock(&mgr->lock);
1327
	mutex_unlock(&mgr->lock);
1328
	if (mstb) {
1328
	if (mstb) {
1329
		drm_dp_check_and_send_link_address(mgr, mstb);
1329
		drm_dp_check_and_send_link_address(mgr, mstb);
1330
		drm_dp_put_mst_branch_device(mstb);
1330
		drm_dp_put_mst_branch_device(mstb);
1331
	}
1331
	}
1332
}
1332
}
1333
 
1333
 
1334
static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
1334
static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
1335
				 u8 *guid)
1335
				 u8 *guid)
1336
{
1336
{
1337
	static u8 zero_guid[16];
1337
	static u8 zero_guid[16];
1338
 
1338
 
1339
	if (!memcmp(guid, zero_guid, 16)) {
1339
	if (!memcmp(guid, zero_guid, 16)) {
1340
		u64 salt = get_jiffies_64();
1340
		u64 salt = get_jiffies_64();
1341
		memcpy(&guid[0], &salt, sizeof(u64));
1341
		memcpy(&guid[0], &salt, sizeof(u64));
1342
		memcpy(&guid[8], &salt, sizeof(u64));
1342
		memcpy(&guid[8], &salt, sizeof(u64));
1343
		return false;
1343
		return false;
1344
	}
1344
	}
1345
	return true;
1345
	return true;
1346
}
1346
}
1347
 
1347
 
1348
#if 0
1348
#if 0
1349
static int build_dpcd_read(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32 offset, u8 num_bytes)
1349
static int build_dpcd_read(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32 offset, u8 num_bytes)
1350
{
1350
{
1351
	struct drm_dp_sideband_msg_req_body req;
1351
	struct drm_dp_sideband_msg_req_body req;
1352
 
1352
 
1353
	req.req_type = DP_REMOTE_DPCD_READ;
1353
	req.req_type = DP_REMOTE_DPCD_READ;
1354
	req.u.dpcd_read.port_number = port_num;
1354
	req.u.dpcd_read.port_number = port_num;
1355
	req.u.dpcd_read.dpcd_address = offset;
1355
	req.u.dpcd_read.dpcd_address = offset;
1356
	req.u.dpcd_read.num_bytes = num_bytes;
1356
	req.u.dpcd_read.num_bytes = num_bytes;
1357
	drm_dp_encode_sideband_req(&req, msg);
1357
	drm_dp_encode_sideband_req(&req, msg);
1358
 
1358
 
1359
	return 0;
1359
	return 0;
1360
}
1360
}
1361
#endif
1361
#endif
1362
 
1362
 
1363
static int drm_dp_send_sideband_msg(struct drm_dp_mst_topology_mgr *mgr,
1363
static int drm_dp_send_sideband_msg(struct drm_dp_mst_topology_mgr *mgr,
1364
				    bool up, u8 *msg, int len)
1364
				    bool up, u8 *msg, int len)
1365
{
1365
{
1366
	int ret;
1366
	int ret;
1367
	int regbase = up ? DP_SIDEBAND_MSG_UP_REP_BASE : DP_SIDEBAND_MSG_DOWN_REQ_BASE;
1367
	int regbase = up ? DP_SIDEBAND_MSG_UP_REP_BASE : DP_SIDEBAND_MSG_DOWN_REQ_BASE;
1368
	int tosend, total, offset;
1368
	int tosend, total, offset;
1369
	int retries = 0;
1369
	int retries = 0;
1370
 
1370
 
1371
retry:
1371
retry:
1372
	total = len;
1372
	total = len;
1373
	offset = 0;
1373
	offset = 0;
1374
	do {
1374
	do {
1375
		tosend = min3(mgr->max_dpcd_transaction_bytes, 16, total);
1375
		tosend = min3(mgr->max_dpcd_transaction_bytes, 16, total);
1376
 
1376
 
1377
		ret = drm_dp_dpcd_write(mgr->aux, regbase + offset,
1377
		ret = drm_dp_dpcd_write(mgr->aux, regbase + offset,
1378
					&msg[offset],
1378
					&msg[offset],
1379
					tosend);
1379
					tosend);
1380
		if (ret != tosend) {
1380
		if (ret != tosend) {
1381
			if (ret == -EIO && retries < 5) {
1381
			if (ret == -EIO && retries < 5) {
1382
				retries++;
1382
				retries++;
1383
				goto retry;
1383
				goto retry;
1384
			}
1384
			}
1385
			DRM_DEBUG_KMS("failed to dpcd write %d %d\n", tosend, ret);
1385
			DRM_DEBUG_KMS("failed to dpcd write %d %d\n", tosend, ret);
1386
 
1386
 
1387
			return -EIO;
1387
			return -EIO;
1388
		}
1388
		}
1389
		offset += tosend;
1389
		offset += tosend;
1390
		total -= tosend;
1390
		total -= tosend;
1391
	} while (total > 0);
1391
	} while (total > 0);
1392
	return 0;
1392
	return 0;
1393
}
1393
}
1394
 
1394
 
1395
static int set_hdr_from_dst_qlock(struct drm_dp_sideband_msg_hdr *hdr,
1395
static int set_hdr_from_dst_qlock(struct drm_dp_sideband_msg_hdr *hdr,
1396
				  struct drm_dp_sideband_msg_tx *txmsg)
1396
				  struct drm_dp_sideband_msg_tx *txmsg)
1397
{
1397
{
1398
	struct drm_dp_mst_branch *mstb = txmsg->dst;
1398
	struct drm_dp_mst_branch *mstb = txmsg->dst;
1399
	u8 req_type;
1399
	u8 req_type;
1400
 
1400
 
1401
	/* both msg slots are full */
1401
	/* both msg slots are full */
1402
	if (txmsg->seqno == -1) {
1402
	if (txmsg->seqno == -1) {
1403
		if (mstb->tx_slots[0] && mstb->tx_slots[1]) {
1403
		if (mstb->tx_slots[0] && mstb->tx_slots[1]) {
1404
			DRM_DEBUG_KMS("%s: failed to find slot\n", __func__);
1404
			DRM_DEBUG_KMS("%s: failed to find slot\n", __func__);
1405
			return -EAGAIN;
1405
			return -EAGAIN;
1406
		}
1406
		}
1407
		if (mstb->tx_slots[0] == NULL && mstb->tx_slots[1] == NULL) {
1407
		if (mstb->tx_slots[0] == NULL && mstb->tx_slots[1] == NULL) {
1408
			txmsg->seqno = mstb->last_seqno;
1408
			txmsg->seqno = mstb->last_seqno;
1409
			mstb->last_seqno ^= 1;
1409
			mstb->last_seqno ^= 1;
1410
		} else if (mstb->tx_slots[0] == NULL)
1410
		} else if (mstb->tx_slots[0] == NULL)
1411
			txmsg->seqno = 0;
1411
			txmsg->seqno = 0;
1412
		else
1412
		else
1413
			txmsg->seqno = 1;
1413
			txmsg->seqno = 1;
1414
		mstb->tx_slots[txmsg->seqno] = txmsg;
1414
		mstb->tx_slots[txmsg->seqno] = txmsg;
1415
	}
1415
	}
1416
 
1416
 
1417
	req_type = txmsg->msg[0] & 0x7f;
1417
	req_type = txmsg->msg[0] & 0x7f;
1418
	if (req_type == DP_CONNECTION_STATUS_NOTIFY ||
1418
	if (req_type == DP_CONNECTION_STATUS_NOTIFY ||
1419
		req_type == DP_RESOURCE_STATUS_NOTIFY)
1419
		req_type == DP_RESOURCE_STATUS_NOTIFY)
1420
		hdr->broadcast = 1;
1420
		hdr->broadcast = 1;
1421
	else
1421
	else
1422
	hdr->broadcast = 0;
1422
	hdr->broadcast = 0;
1423
	hdr->path_msg = txmsg->path_msg;
1423
	hdr->path_msg = txmsg->path_msg;
1424
	hdr->lct = mstb->lct;
1424
	hdr->lct = mstb->lct;
1425
	hdr->lcr = mstb->lct - 1;
1425
	hdr->lcr = mstb->lct - 1;
1426
	if (mstb->lct > 1)
1426
	if (mstb->lct > 1)
1427
		memcpy(hdr->rad, mstb->rad, mstb->lct / 2);
1427
		memcpy(hdr->rad, mstb->rad, mstb->lct / 2);
1428
	hdr->seqno = txmsg->seqno;
1428
	hdr->seqno = txmsg->seqno;
1429
	return 0;
1429
	return 0;
1430
}
1430
}
1431
/*
1431
/*
1432
 * process a single block of the next message in the sideband queue
1432
 * process a single block of the next message in the sideband queue
1433
 */
1433
 */
1434
static int process_single_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
1434
static int process_single_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
1435
				   struct drm_dp_sideband_msg_tx *txmsg,
1435
				   struct drm_dp_sideband_msg_tx *txmsg,
1436
				   bool up)
1436
				   bool up)
1437
{
1437
{
1438
	u8 chunk[48];
1438
	u8 chunk[48];
1439
	struct drm_dp_sideband_msg_hdr hdr;
1439
	struct drm_dp_sideband_msg_hdr hdr;
1440
	int len, space, idx, tosend;
1440
	int len, space, idx, tosend;
1441
	int ret;
1441
	int ret;
1442
 
1442
 
1443
	memset(&hdr, 0, sizeof(struct drm_dp_sideband_msg_hdr));
1443
	memset(&hdr, 0, sizeof(struct drm_dp_sideband_msg_hdr));
1444
 
1444
 
1445
	if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED) {
1445
	if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED) {
1446
		txmsg->seqno = -1;
1446
		txmsg->seqno = -1;
1447
		txmsg->state = DRM_DP_SIDEBAND_TX_START_SEND;
1447
		txmsg->state = DRM_DP_SIDEBAND_TX_START_SEND;
1448
	}
1448
	}
1449
 
1449
 
1450
	/* make hdr from dst mst - for replies use seqno
1450
	/* make hdr from dst mst - for replies use seqno
1451
	   otherwise assign one */
1451
	   otherwise assign one */
1452
	ret = set_hdr_from_dst_qlock(&hdr, txmsg);
1452
	ret = set_hdr_from_dst_qlock(&hdr, txmsg);
1453
	if (ret < 0)
1453
	if (ret < 0)
1454
		return ret;
1454
		return ret;
1455
 
1455
 
1456
	/* amount left to send in this message */
1456
	/* amount left to send in this message */
1457
	len = txmsg->cur_len - txmsg->cur_offset;
1457
	len = txmsg->cur_len - txmsg->cur_offset;
1458
 
1458
 
1459
	/* 48 - sideband msg size - 1 byte for data CRC, x header bytes */
1459
	/* 48 - sideband msg size - 1 byte for data CRC, x header bytes */
1460
	space = 48 - 1 - drm_dp_calc_sb_hdr_size(&hdr);
1460
	space = 48 - 1 - drm_dp_calc_sb_hdr_size(&hdr);
1461
 
1461
 
1462
	tosend = min(len, space);
1462
	tosend = min(len, space);
1463
	if (len == txmsg->cur_len)
1463
	if (len == txmsg->cur_len)
1464
		hdr.somt = 1;
1464
		hdr.somt = 1;
1465
	if (space >= len)
1465
	if (space >= len)
1466
		hdr.eomt = 1;
1466
		hdr.eomt = 1;
1467
 
1467
 
1468
 
1468
 
1469
	hdr.msg_len = tosend + 1;
1469
	hdr.msg_len = tosend + 1;
1470
	drm_dp_encode_sideband_msg_hdr(&hdr, chunk, &idx);
1470
	drm_dp_encode_sideband_msg_hdr(&hdr, chunk, &idx);
1471
	memcpy(&chunk[idx], &txmsg->msg[txmsg->cur_offset], tosend);
1471
	memcpy(&chunk[idx], &txmsg->msg[txmsg->cur_offset], tosend);
1472
	/* add crc at end */
1472
	/* add crc at end */
1473
	drm_dp_crc_sideband_chunk_req(&chunk[idx], tosend);
1473
	drm_dp_crc_sideband_chunk_req(&chunk[idx], tosend);
1474
	idx += tosend + 1;
1474
	idx += tosend + 1;
1475
 
1475
 
1476
	ret = drm_dp_send_sideband_msg(mgr, up, chunk, idx);
1476
	ret = drm_dp_send_sideband_msg(mgr, up, chunk, idx);
1477
	if (ret) {
1477
	if (ret) {
1478
		DRM_DEBUG_KMS("sideband msg failed to send\n");
1478
		DRM_DEBUG_KMS("sideband msg failed to send\n");
1479
		return ret;
1479
		return ret;
1480
	}
1480
	}
1481
 
1481
 
1482
	txmsg->cur_offset += tosend;
1482
	txmsg->cur_offset += tosend;
1483
	if (txmsg->cur_offset == txmsg->cur_len) {
1483
	if (txmsg->cur_offset == txmsg->cur_len) {
1484
		txmsg->state = DRM_DP_SIDEBAND_TX_SENT;
1484
		txmsg->state = DRM_DP_SIDEBAND_TX_SENT;
1485
		return 1;
1485
		return 1;
1486
	}
1486
	}
1487
	return 0;
1487
	return 0;
1488
}
1488
}
1489
 
1489
 
1490
static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
1490
static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
1491
{
1491
{
1492
	struct drm_dp_sideband_msg_tx *txmsg;
1492
	struct drm_dp_sideband_msg_tx *txmsg;
1493
	int ret;
1493
	int ret;
1494
 
1494
 
1495
	WARN_ON(!mutex_is_locked(&mgr->qlock));
1495
	WARN_ON(!mutex_is_locked(&mgr->qlock));
1496
 
1496
 
1497
	/* construct a chunk from the first msg in the tx_msg queue */
1497
	/* construct a chunk from the first msg in the tx_msg queue */
1498
	if (list_empty(&mgr->tx_msg_downq)) {
1498
	if (list_empty(&mgr->tx_msg_downq)) {
1499
		mgr->tx_down_in_progress = false;
1499
		mgr->tx_down_in_progress = false;
1500
		return;
1500
		return;
1501
	}
1501
	}
1502
	mgr->tx_down_in_progress = true;
1502
	mgr->tx_down_in_progress = true;
1503
 
1503
 
1504
	txmsg = list_first_entry(&mgr->tx_msg_downq, struct drm_dp_sideband_msg_tx, next);
1504
	txmsg = list_first_entry(&mgr->tx_msg_downq, struct drm_dp_sideband_msg_tx, next);
1505
	ret = process_single_tx_qlock(mgr, txmsg, false);
1505
	ret = process_single_tx_qlock(mgr, txmsg, false);
1506
	if (ret == 1) {
1506
	if (ret == 1) {
1507
		/* txmsg is sent it should be in the slots now */
1507
		/* txmsg is sent it should be in the slots now */
1508
		list_del(&txmsg->next);
1508
		list_del(&txmsg->next);
1509
	} else if (ret) {
1509
	} else if (ret) {
1510
		DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
1510
		DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
1511
		list_del(&txmsg->next);
1511
		list_del(&txmsg->next);
1512
		if (txmsg->seqno != -1)
1512
		if (txmsg->seqno != -1)
1513
			txmsg->dst->tx_slots[txmsg->seqno] = NULL;
1513
			txmsg->dst->tx_slots[txmsg->seqno] = NULL;
1514
		txmsg->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
1514
		txmsg->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
1515
//       wake_up(&mgr->tx_waitq);
1515
//       wake_up(&mgr->tx_waitq);
1516
	}
1516
	}
1517
	if (list_empty(&mgr->tx_msg_downq)) {
1517
	if (list_empty(&mgr->tx_msg_downq)) {
1518
		mgr->tx_down_in_progress = false;
1518
		mgr->tx_down_in_progress = false;
1519
		return;
1519
		return;
1520
	}
1520
	}
1521
}
1521
}
1522
 
1522
 
1523
/* called holding qlock */
1523
/* called holding qlock */
1524
static void process_single_up_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
1524
static void process_single_up_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
1525
				       struct drm_dp_sideband_msg_tx *txmsg)
1525
				       struct drm_dp_sideband_msg_tx *txmsg)
1526
{
1526
{
1527
	int ret;
1527
	int ret;
1528
 
1528
 
1529
	/* construct a chunk from the first msg in the tx_msg queue */
1529
	/* construct a chunk from the first msg in the tx_msg queue */
1530
	ret = process_single_tx_qlock(mgr, txmsg, true);
1530
	ret = process_single_tx_qlock(mgr, txmsg, true);
1531
 
1531
 
1532
	if (ret != 1)
1532
	if (ret != 1)
1533
		DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
1533
		DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
1534
 
1534
 
1535
	txmsg->dst->tx_slots[txmsg->seqno] = NULL;
1535
	txmsg->dst->tx_slots[txmsg->seqno] = NULL;
1536
}
1536
}
1537
 
1537
 
1538
static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,
1538
static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,
1539
				 struct drm_dp_sideband_msg_tx *txmsg)
1539
				 struct drm_dp_sideband_msg_tx *txmsg)
1540
{
1540
{
1541
	mutex_lock(&mgr->qlock);
1541
	mutex_lock(&mgr->qlock);
1542
	list_add_tail(&txmsg->next, &mgr->tx_msg_downq);
1542
	list_add_tail(&txmsg->next, &mgr->tx_msg_downq);
1543
	if (!mgr->tx_down_in_progress)
1543
	if (!mgr->tx_down_in_progress)
1544
		process_single_down_tx_qlock(mgr);
1544
		process_single_down_tx_qlock(mgr);
1545
	mutex_unlock(&mgr->qlock);
1545
	mutex_unlock(&mgr->qlock);
1546
}
1546
}
1547
 
1547
 
1548
static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
1548
static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
1549
				     struct drm_dp_mst_branch *mstb)
1549
				     struct drm_dp_mst_branch *mstb)
1550
{
1550
{
1551
	int len;
1551
	int len;
1552
	struct drm_dp_sideband_msg_tx *txmsg;
1552
	struct drm_dp_sideband_msg_tx *txmsg;
1553
	int ret;
1553
	int ret;
1554
 
1554
 
1555
	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
1555
	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
1556
	if (!txmsg)
1556
	if (!txmsg)
1557
		return;
1557
		return;
1558
 
1558
 
1559
	txmsg->dst = mstb;
1559
	txmsg->dst = mstb;
1560
	len = build_link_address(txmsg);
1560
	len = build_link_address(txmsg);
1561
 
1561
 
1562
	mstb->link_address_sent = true;
1562
	mstb->link_address_sent = true;
1563
	drm_dp_queue_down_tx(mgr, txmsg);
1563
	drm_dp_queue_down_tx(mgr, txmsg);
1564
 
1564
 
1565
	ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
1565
	ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
1566
	if (ret > 0) {
1566
	if (ret > 0) {
1567
		int i;
1567
		int i;
1568
 
1568
 
1569
		if (txmsg->reply.reply_type == 1)
1569
		if (txmsg->reply.reply_type == 1)
1570
			DRM_DEBUG_KMS("link address nak received\n");
1570
			DRM_DEBUG_KMS("link address nak received\n");
1571
		else {
1571
		else {
1572
			DRM_DEBUG_KMS("link address reply: %d\n", txmsg->reply.u.link_addr.nports);
1572
			DRM_DEBUG_KMS("link address reply: %d\n", txmsg->reply.u.link_addr.nports);
1573
			for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) {
1573
			for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) {
1574
				DRM_DEBUG_KMS("port %d: input %d, pdt: %d, pn: %d, dpcd_rev: %02x, mcs: %d, ddps: %d, ldps %d, sdp %d/%d\n", i,
1574
				DRM_DEBUG_KMS("port %d: input %d, pdt: %d, pn: %d, dpcd_rev: %02x, mcs: %d, ddps: %d, ldps %d, sdp %d/%d\n", i,
1575
				       txmsg->reply.u.link_addr.ports[i].input_port,
1575
				       txmsg->reply.u.link_addr.ports[i].input_port,
1576
				       txmsg->reply.u.link_addr.ports[i].peer_device_type,
1576
				       txmsg->reply.u.link_addr.ports[i].peer_device_type,
1577
				       txmsg->reply.u.link_addr.ports[i].port_number,
1577
				       txmsg->reply.u.link_addr.ports[i].port_number,
1578
				       txmsg->reply.u.link_addr.ports[i].dpcd_revision,
1578
				       txmsg->reply.u.link_addr.ports[i].dpcd_revision,
1579
				       txmsg->reply.u.link_addr.ports[i].mcs,
1579
				       txmsg->reply.u.link_addr.ports[i].mcs,
1580
				       txmsg->reply.u.link_addr.ports[i].ddps,
1580
				       txmsg->reply.u.link_addr.ports[i].ddps,
1581
				       txmsg->reply.u.link_addr.ports[i].legacy_device_plug_status,
1581
				       txmsg->reply.u.link_addr.ports[i].legacy_device_plug_status,
1582
				       txmsg->reply.u.link_addr.ports[i].num_sdp_streams,
1582
				       txmsg->reply.u.link_addr.ports[i].num_sdp_streams,
1583
				       txmsg->reply.u.link_addr.ports[i].num_sdp_stream_sinks);
1583
				       txmsg->reply.u.link_addr.ports[i].num_sdp_stream_sinks);
1584
			}
1584
			}
1585
 
1585
 
1586
			drm_dp_check_mstb_guid(mstb, txmsg->reply.u.link_addr.guid);
1586
			drm_dp_check_mstb_guid(mstb, txmsg->reply.u.link_addr.guid);
1587
 
1587
 
1588
			for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) {
1588
			for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) {
1589
				drm_dp_add_port(mstb, mgr->dev, &txmsg->reply.u.link_addr.ports[i]);
1589
				drm_dp_add_port(mstb, mgr->dev, &txmsg->reply.u.link_addr.ports[i]);
1590
			}
1590
			}
1591
			(*mgr->cbs->hotplug)(mgr);
1591
			(*mgr->cbs->hotplug)(mgr);
1592
		}
1592
		}
1593
	} else {
1593
	} else {
1594
		mstb->link_address_sent = false;
1594
		mstb->link_address_sent = false;
1595
		DRM_DEBUG_KMS("link address failed %d\n", ret);
1595
		DRM_DEBUG_KMS("link address failed %d\n", ret);
1596
	}
1596
	}
1597
 
1597
 
1598
	kfree(txmsg);
1598
	kfree(txmsg);
1599
}
1599
}
1600
 
1600
 
1601
static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
1601
static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
1602
					   struct drm_dp_mst_branch *mstb,
1602
					   struct drm_dp_mst_branch *mstb,
1603
					   struct drm_dp_mst_port *port)
1603
					   struct drm_dp_mst_port *port)
1604
{
1604
{
1605
	int len;
1605
	int len;
1606
	struct drm_dp_sideband_msg_tx *txmsg;
1606
	struct drm_dp_sideband_msg_tx *txmsg;
1607
	int ret;
1607
	int ret;
1608
 
1608
 
1609
	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
1609
	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
1610
	if (!txmsg)
1610
	if (!txmsg)
1611
		return -ENOMEM;
1611
		return -ENOMEM;
1612
 
1612
 
1613
	txmsg->dst = mstb;
1613
	txmsg->dst = mstb;
1614
	len = build_enum_path_resources(txmsg, port->port_num);
1614
	len = build_enum_path_resources(txmsg, port->port_num);
1615
 
1615
 
1616
	drm_dp_queue_down_tx(mgr, txmsg);
1616
	drm_dp_queue_down_tx(mgr, txmsg);
1617
 
1617
 
1618
	ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
1618
	ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
1619
	if (ret > 0) {
1619
	if (ret > 0) {
1620
		if (txmsg->reply.reply_type == 1)
1620
		if (txmsg->reply.reply_type == 1)
1621
			DRM_DEBUG_KMS("enum path resources nak received\n");
1621
			DRM_DEBUG_KMS("enum path resources nak received\n");
1622
		else {
1622
		else {
1623
			if (port->port_num != txmsg->reply.u.path_resources.port_number)
1623
			if (port->port_num != txmsg->reply.u.path_resources.port_number)
1624
				DRM_ERROR("got incorrect port in response\n");
1624
				DRM_ERROR("got incorrect port in response\n");
1625
			DRM_DEBUG_KMS("enum path resources %d: %d %d\n", txmsg->reply.u.path_resources.port_number, txmsg->reply.u.path_resources.full_payload_bw_number,
1625
			DRM_DEBUG_KMS("enum path resources %d: %d %d\n", txmsg->reply.u.path_resources.port_number, txmsg->reply.u.path_resources.full_payload_bw_number,
1626
			       txmsg->reply.u.path_resources.avail_payload_bw_number);
1626
			       txmsg->reply.u.path_resources.avail_payload_bw_number);
1627
			port->available_pbn = txmsg->reply.u.path_resources.avail_payload_bw_number;
1627
			port->available_pbn = txmsg->reply.u.path_resources.avail_payload_bw_number;
1628
		}
1628
		}
1629
	}
1629
	}
1630
 
1630
 
1631
	kfree(txmsg);
1631
	kfree(txmsg);
1632
	return 0;
1632
	return 0;
1633
}
1633
}
1634
 
1634
 
1635
static struct drm_dp_mst_port *drm_dp_get_last_connected_port_to_mstb(struct drm_dp_mst_branch *mstb)
1635
static struct drm_dp_mst_port *drm_dp_get_last_connected_port_to_mstb(struct drm_dp_mst_branch *mstb)
1636
{
1636
{
1637
	if (!mstb->port_parent)
1637
	if (!mstb->port_parent)
1638
		return NULL;
1638
		return NULL;
1639
 
1639
 
1640
	if (mstb->port_parent->mstb != mstb)
1640
	if (mstb->port_parent->mstb != mstb)
1641
		return mstb->port_parent;
1641
		return mstb->port_parent;
1642
 
1642
 
1643
	return drm_dp_get_last_connected_port_to_mstb(mstb->port_parent->parent);
1643
	return drm_dp_get_last_connected_port_to_mstb(mstb->port_parent->parent);
1644
}
1644
}
1645
 
1645
 
1646
static struct drm_dp_mst_branch *drm_dp_get_last_connected_port_and_mstb(struct drm_dp_mst_topology_mgr *mgr,
1646
static struct drm_dp_mst_branch *drm_dp_get_last_connected_port_and_mstb(struct drm_dp_mst_topology_mgr *mgr,
1647
									 struct drm_dp_mst_branch *mstb,
1647
									 struct drm_dp_mst_branch *mstb,
1648
									 int *port_num)
1648
									 int *port_num)
1649
{
1649
{
1650
	struct drm_dp_mst_branch *rmstb = NULL;
1650
	struct drm_dp_mst_branch *rmstb = NULL;
1651
	struct drm_dp_mst_port *found_port;
1651
	struct drm_dp_mst_port *found_port;
1652
	mutex_lock(&mgr->lock);
1652
	mutex_lock(&mgr->lock);
1653
	if (mgr->mst_primary) {
1653
	if (mgr->mst_primary) {
1654
		found_port = drm_dp_get_last_connected_port_to_mstb(mstb);
1654
		found_port = drm_dp_get_last_connected_port_to_mstb(mstb);
1655
 
1655
 
1656
		if (found_port) {
1656
		if (found_port) {
1657
			rmstb = found_port->parent;
1657
			rmstb = found_port->parent;
1658
			kref_get(&rmstb->kref);
1658
			kref_get(&rmstb->kref);
1659
			*port_num = found_port->port_num;
1659
			*port_num = found_port->port_num;
1660
		}
1660
		}
1661
	}
1661
	}
1662
	mutex_unlock(&mgr->lock);
1662
	mutex_unlock(&mgr->lock);
1663
	return rmstb;
1663
	return rmstb;
1664
}
1664
}
1665
 
1665
 
1666
static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
1666
static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
1667
				   struct drm_dp_mst_port *port,
1667
				   struct drm_dp_mst_port *port,
1668
				   int id,
1668
				   int id,
1669
				   int pbn)
1669
				   int pbn)
1670
{
1670
{
1671
	struct drm_dp_sideband_msg_tx *txmsg;
1671
	struct drm_dp_sideband_msg_tx *txmsg;
1672
	struct drm_dp_mst_branch *mstb;
1672
	struct drm_dp_mst_branch *mstb;
1673
	int len, ret, port_num;
1673
	int len, ret, port_num;
-
 
1674
 
-
 
1675
	port = drm_dp_get_validated_port_ref(mgr, port);
-
 
1676
	if (!port)
-
 
1677
		return -EINVAL;
1674
 
1678
 
1675
	port_num = port->port_num;
1679
	port_num = port->port_num;
1676
	mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
1680
	mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
1677
	if (!mstb) {
1681
	if (!mstb) {
1678
		mstb = drm_dp_get_last_connected_port_and_mstb(mgr, port->parent, &port_num);
1682
		mstb = drm_dp_get_last_connected_port_and_mstb(mgr, port->parent, &port_num);
1679
 
1683
 
-
 
1684
		if (!mstb) {
1680
	if (!mstb)
1685
			drm_dp_put_port(port);
1681
		return -EINVAL;
1686
		return -EINVAL;
-
 
1687
	}
1682
	}
1688
	}
1683
 
1689
 
1684
	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
1690
	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
1685
	if (!txmsg) {
1691
	if (!txmsg) {
1686
		ret = -ENOMEM;
1692
		ret = -ENOMEM;
1687
		goto fail_put;
1693
		goto fail_put;
1688
	}
1694
	}
1689
 
1695
 
1690
	txmsg->dst = mstb;
1696
	txmsg->dst = mstb;
1691
	len = build_allocate_payload(txmsg, port_num,
1697
	len = build_allocate_payload(txmsg, port_num,
1692
				     id,
1698
				     id,
1693
				     pbn);
1699
				     pbn);
1694
 
1700
 
1695
	drm_dp_queue_down_tx(mgr, txmsg);
1701
	drm_dp_queue_down_tx(mgr, txmsg);
1696
 
1702
 
1697
	ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
1703
	ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
1698
	if (ret > 0) {
1704
	if (ret > 0) {
1699
		if (txmsg->reply.reply_type == 1) {
1705
		if (txmsg->reply.reply_type == 1) {
1700
			ret = -EINVAL;
1706
			ret = -EINVAL;
1701
		} else
1707
		} else
1702
			ret = 0;
1708
			ret = 0;
1703
	}
1709
	}
1704
	kfree(txmsg);
1710
	kfree(txmsg);
1705
fail_put:
1711
fail_put:
1706
	drm_dp_put_mst_branch_device(mstb);
1712
	drm_dp_put_mst_branch_device(mstb);
-
 
1713
	drm_dp_put_port(port);
1707
	return ret;
1714
	return ret;
1708
}
1715
}
1709
 
1716
 
1710
static int drm_dp_create_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
1717
static int drm_dp_create_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
1711
				       int id,
1718
				       int id,
1712
				       struct drm_dp_payload *payload)
1719
				       struct drm_dp_payload *payload)
1713
{
1720
{
1714
	int ret;
1721
	int ret;
1715
 
1722
 
1716
	ret = drm_dp_dpcd_write_payload(mgr, id, payload);
1723
	ret = drm_dp_dpcd_write_payload(mgr, id, payload);
1717
	if (ret < 0) {
1724
	if (ret < 0) {
1718
		payload->payload_state = 0;
1725
		payload->payload_state = 0;
1719
		return ret;
1726
		return ret;
1720
	}
1727
	}
1721
	payload->payload_state = DP_PAYLOAD_LOCAL;
1728
	payload->payload_state = DP_PAYLOAD_LOCAL;
1722
	return 0;
1729
	return 0;
1723
}
1730
}
1724
 
1731
 
1725
static int drm_dp_create_payload_step2(struct drm_dp_mst_topology_mgr *mgr,
1732
static int drm_dp_create_payload_step2(struct drm_dp_mst_topology_mgr *mgr,
1726
				       struct drm_dp_mst_port *port,
1733
				       struct drm_dp_mst_port *port,
1727
				       int id,
1734
				       int id,
1728
				       struct drm_dp_payload *payload)
1735
				       struct drm_dp_payload *payload)
1729
{
1736
{
1730
	int ret;
1737
	int ret;
1731
	ret = drm_dp_payload_send_msg(mgr, port, id, port->vcpi.pbn);
1738
	ret = drm_dp_payload_send_msg(mgr, port, id, port->vcpi.pbn);
1732
	if (ret < 0)
1739
	if (ret < 0)
1733
		return ret;
1740
		return ret;
1734
	payload->payload_state = DP_PAYLOAD_REMOTE;
1741
	payload->payload_state = DP_PAYLOAD_REMOTE;
1735
	return ret;
1742
	return ret;
1736
}
1743
}
1737
 
1744
 
1738
static int drm_dp_destroy_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
1745
static int drm_dp_destroy_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
1739
					struct drm_dp_mst_port *port,
1746
					struct drm_dp_mst_port *port,
1740
					int id,
1747
					int id,
1741
					struct drm_dp_payload *payload)
1748
					struct drm_dp_payload *payload)
1742
{
1749
{
1743
	DRM_DEBUG_KMS("\n");
1750
	DRM_DEBUG_KMS("\n");
1744
	/* its okay for these to fail */
1751
	/* its okay for these to fail */
1745
	if (port) {
1752
	if (port) {
1746
		drm_dp_payload_send_msg(mgr, port, id, 0);
1753
		drm_dp_payload_send_msg(mgr, port, id, 0);
1747
	}
1754
	}
1748
 
1755
 
1749
	drm_dp_dpcd_write_payload(mgr, id, payload);
1756
	drm_dp_dpcd_write_payload(mgr, id, payload);
1750
	payload->payload_state = DP_PAYLOAD_DELETE_LOCAL;
1757
	payload->payload_state = DP_PAYLOAD_DELETE_LOCAL;
1751
	return 0;
1758
	return 0;
1752
}
1759
}
1753
 
1760
 
1754
static int drm_dp_destroy_payload_step2(struct drm_dp_mst_topology_mgr *mgr,
1761
static int drm_dp_destroy_payload_step2(struct drm_dp_mst_topology_mgr *mgr,
1755
					int id,
1762
					int id,
1756
					struct drm_dp_payload *payload)
1763
					struct drm_dp_payload *payload)
1757
{
1764
{
1758
	payload->payload_state = 0;
1765
	payload->payload_state = 0;
1759
	return 0;
1766
	return 0;
1760
}
1767
}
1761
 
1768
 
1762
/**
1769
/**
1763
 * drm_dp_update_payload_part1() - Execute payload update part 1
1770
 * drm_dp_update_payload_part1() - Execute payload update part 1
1764
 * @mgr: manager to use.
1771
 * @mgr: manager to use.
1765
 *
1772
 *
1766
 * This iterates over all proposed virtual channels, and tries to
1773
 * This iterates over all proposed virtual channels, and tries to
1767
 * allocate space in the link for them. For 0->slots transitions,
1774
 * allocate space in the link for them. For 0->slots transitions,
1768
 * this step just writes the VCPI to the MST device. For slots->0
1775
 * this step just writes the VCPI to the MST device. For slots->0
1769
 * transitions, this writes the updated VCPIs and removes the
1776
 * transitions, this writes the updated VCPIs and removes the
1770
 * remote VC payloads.
1777
 * remote VC payloads.
1771
 *
1778
 *
1772
 * after calling this the driver should generate ACT and payload
1779
 * after calling this the driver should generate ACT and payload
1773
 * packets.
1780
 * packets.
1774
 */
1781
 */
1775
int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
1782
int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
1776
{
1783
{
1777
	int i, j;
1784
	int i, j;
1778
	int cur_slots = 1;
1785
	int cur_slots = 1;
1779
	struct drm_dp_payload req_payload;
1786
	struct drm_dp_payload req_payload;
1780
	struct drm_dp_mst_port *port;
1787
	struct drm_dp_mst_port *port;
1781
 
1788
 
1782
	mutex_lock(&mgr->payload_lock);
1789
	mutex_lock(&mgr->payload_lock);
1783
	for (i = 0; i < mgr->max_payloads; i++) {
1790
	for (i = 0; i < mgr->max_payloads; i++) {
1784
		/* solve the current payloads - compare to the hw ones
1791
		/* solve the current payloads - compare to the hw ones
1785
		   - update the hw view */
1792
		   - update the hw view */
1786
		req_payload.start_slot = cur_slots;
1793
		req_payload.start_slot = cur_slots;
1787
		if (mgr->proposed_vcpis[i]) {
1794
		if (mgr->proposed_vcpis[i]) {
1788
			port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
1795
			port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
-
 
1796
			port = drm_dp_get_validated_port_ref(mgr, port);
-
 
1797
			if (!port) {
-
 
1798
				mutex_unlock(&mgr->payload_lock);
-
 
1799
				return -EINVAL;
-
 
1800
			}
1789
			req_payload.num_slots = mgr->proposed_vcpis[i]->num_slots;
1801
			req_payload.num_slots = mgr->proposed_vcpis[i]->num_slots;
1790
		} else {
1802
		} else {
1791
			port = NULL;
1803
			port = NULL;
1792
			req_payload.num_slots = 0;
1804
			req_payload.num_slots = 0;
1793
		}
1805
		}
1794
 
1806
 
1795
		if (mgr->payloads[i].start_slot != req_payload.start_slot) {
1807
		if (mgr->payloads[i].start_slot != req_payload.start_slot) {
1796
			mgr->payloads[i].start_slot = req_payload.start_slot;
1808
			mgr->payloads[i].start_slot = req_payload.start_slot;
1797
		}
1809
		}
1798
		/* work out what is required to happen with this payload */
1810
		/* work out what is required to happen with this payload */
1799
		if (mgr->payloads[i].num_slots != req_payload.num_slots) {
1811
		if (mgr->payloads[i].num_slots != req_payload.num_slots) {
1800
 
1812
 
1801
			/* need to push an update for this payload */
1813
			/* need to push an update for this payload */
1802
			if (req_payload.num_slots) {
1814
			if (req_payload.num_slots) {
1803
				drm_dp_create_payload_step1(mgr, mgr->proposed_vcpis[i]->vcpi, &req_payload);
1815
				drm_dp_create_payload_step1(mgr, mgr->proposed_vcpis[i]->vcpi, &req_payload);
1804
				mgr->payloads[i].num_slots = req_payload.num_slots;
1816
				mgr->payloads[i].num_slots = req_payload.num_slots;
1805
			} else if (mgr->payloads[i].num_slots) {
1817
			} else if (mgr->payloads[i].num_slots) {
1806
				mgr->payloads[i].num_slots = 0;
1818
				mgr->payloads[i].num_slots = 0;
1807
				drm_dp_destroy_payload_step1(mgr, port, port->vcpi.vcpi, &mgr->payloads[i]);
1819
				drm_dp_destroy_payload_step1(mgr, port, port->vcpi.vcpi, &mgr->payloads[i]);
1808
				req_payload.payload_state = mgr->payloads[i].payload_state;
1820
				req_payload.payload_state = mgr->payloads[i].payload_state;
1809
				mgr->payloads[i].start_slot = 0;
1821
				mgr->payloads[i].start_slot = 0;
1810
			}
1822
			}
1811
			mgr->payloads[i].payload_state = req_payload.payload_state;
1823
			mgr->payloads[i].payload_state = req_payload.payload_state;
1812
		}
1824
		}
1813
		cur_slots += req_payload.num_slots;
1825
		cur_slots += req_payload.num_slots;
-
 
1826
 
-
 
1827
		if (port)
-
 
1828
			drm_dp_put_port(port);
1814
	}
1829
	}
1815
 
1830
 
1816
	for (i = 0; i < mgr->max_payloads; i++) {
1831
	for (i = 0; i < mgr->max_payloads; i++) {
1817
		if (mgr->payloads[i].payload_state == DP_PAYLOAD_DELETE_LOCAL) {
1832
		if (mgr->payloads[i].payload_state == DP_PAYLOAD_DELETE_LOCAL) {
1818
			DRM_DEBUG_KMS("removing payload %d\n", i);
1833
			DRM_DEBUG_KMS("removing payload %d\n", i);
1819
			for (j = i; j < mgr->max_payloads - 1; j++) {
1834
			for (j = i; j < mgr->max_payloads - 1; j++) {
1820
				memcpy(&mgr->payloads[j], &mgr->payloads[j + 1], sizeof(struct drm_dp_payload));
1835
				memcpy(&mgr->payloads[j], &mgr->payloads[j + 1], sizeof(struct drm_dp_payload));
1821
				mgr->proposed_vcpis[j] = mgr->proposed_vcpis[j + 1];
1836
				mgr->proposed_vcpis[j] = mgr->proposed_vcpis[j + 1];
1822
				if (mgr->proposed_vcpis[j] && mgr->proposed_vcpis[j]->num_slots) {
1837
				if (mgr->proposed_vcpis[j] && mgr->proposed_vcpis[j]->num_slots) {
1823
					set_bit(j + 1, &mgr->payload_mask);
1838
					set_bit(j + 1, &mgr->payload_mask);
1824
				} else {
1839
				} else {
1825
					clear_bit(j + 1, &mgr->payload_mask);
1840
					clear_bit(j + 1, &mgr->payload_mask);
1826
				}
1841
				}
1827
			}
1842
			}
1828
			memset(&mgr->payloads[mgr->max_payloads - 1], 0, sizeof(struct drm_dp_payload));
1843
			memset(&mgr->payloads[mgr->max_payloads - 1], 0, sizeof(struct drm_dp_payload));
1829
			mgr->proposed_vcpis[mgr->max_payloads - 1] = NULL;
1844
			mgr->proposed_vcpis[mgr->max_payloads - 1] = NULL;
1830
			clear_bit(mgr->max_payloads, &mgr->payload_mask);
1845
			clear_bit(mgr->max_payloads, &mgr->payload_mask);
1831
 
1846
 
1832
		}
1847
		}
1833
	}
1848
	}
1834
	mutex_unlock(&mgr->payload_lock);
1849
	mutex_unlock(&mgr->payload_lock);
1835
 
1850
 
1836
	return 0;
1851
	return 0;
1837
}
1852
}
1838
EXPORT_SYMBOL(drm_dp_update_payload_part1);
1853
EXPORT_SYMBOL(drm_dp_update_payload_part1);
1839
 
1854
 
1840
/**
1855
/**
1841
 * drm_dp_update_payload_part2() - Execute payload update part 2
1856
 * drm_dp_update_payload_part2() - Execute payload update part 2
1842
 * @mgr: manager to use.
1857
 * @mgr: manager to use.
1843
 *
1858
 *
1844
 * This iterates over all proposed virtual channels, and tries to
1859
 * This iterates over all proposed virtual channels, and tries to
1845
 * allocate space in the link for them. For 0->slots transitions,
1860
 * allocate space in the link for them. For 0->slots transitions,
1846
 * this step writes the remote VC payload commands. For slots->0
1861
 * this step writes the remote VC payload commands. For slots->0
1847
 * this just resets some internal state.
1862
 * this just resets some internal state.
1848
 */
1863
 */
1849
int drm_dp_update_payload_part2(struct drm_dp_mst_topology_mgr *mgr)
1864
int drm_dp_update_payload_part2(struct drm_dp_mst_topology_mgr *mgr)
1850
{
1865
{
1851
	struct drm_dp_mst_port *port;
1866
	struct drm_dp_mst_port *port;
1852
	int i;
1867
	int i;
1853
	int ret = 0;
1868
	int ret = 0;
1854
	mutex_lock(&mgr->payload_lock);
1869
	mutex_lock(&mgr->payload_lock);
1855
	for (i = 0; i < mgr->max_payloads; i++) {
1870
	for (i = 0; i < mgr->max_payloads; i++) {
1856
 
1871
 
1857
		if (!mgr->proposed_vcpis[i])
1872
		if (!mgr->proposed_vcpis[i])
1858
			continue;
1873
			continue;
1859
 
1874
 
1860
		port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
1875
		port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
1861
 
1876
 
1862
		DRM_DEBUG_KMS("payload %d %d\n", i, mgr->payloads[i].payload_state);
1877
		DRM_DEBUG_KMS("payload %d %d\n", i, mgr->payloads[i].payload_state);
1863
		if (mgr->payloads[i].payload_state == DP_PAYLOAD_LOCAL) {
1878
		if (mgr->payloads[i].payload_state == DP_PAYLOAD_LOCAL) {
1864
			ret = drm_dp_create_payload_step2(mgr, port, mgr->proposed_vcpis[i]->vcpi, &mgr->payloads[i]);
1879
			ret = drm_dp_create_payload_step2(mgr, port, mgr->proposed_vcpis[i]->vcpi, &mgr->payloads[i]);
1865
		} else if (mgr->payloads[i].payload_state == DP_PAYLOAD_DELETE_LOCAL) {
1880
		} else if (mgr->payloads[i].payload_state == DP_PAYLOAD_DELETE_LOCAL) {
1866
			ret = drm_dp_destroy_payload_step2(mgr, mgr->proposed_vcpis[i]->vcpi, &mgr->payloads[i]);
1881
			ret = drm_dp_destroy_payload_step2(mgr, mgr->proposed_vcpis[i]->vcpi, &mgr->payloads[i]);
1867
		}
1882
		}
1868
		if (ret) {
1883
		if (ret) {
1869
			mutex_unlock(&mgr->payload_lock);
1884
			mutex_unlock(&mgr->payload_lock);
1870
			return ret;
1885
			return ret;
1871
		}
1886
		}
1872
	}
1887
	}
1873
	mutex_unlock(&mgr->payload_lock);
1888
	mutex_unlock(&mgr->payload_lock);
1874
	return 0;
1889
	return 0;
1875
}
1890
}
1876
EXPORT_SYMBOL(drm_dp_update_payload_part2);
1891
EXPORT_SYMBOL(drm_dp_update_payload_part2);
1877
 
1892
 
1878
#if 0 /* unused as of yet */
1893
#if 0 /* unused as of yet */
1879
static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
1894
static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
1880
				 struct drm_dp_mst_port *port,
1895
				 struct drm_dp_mst_port *port,
1881
				 int offset, int size)
1896
				 int offset, int size)
1882
{
1897
{
1883
	int len;
1898
	int len;
1884
	struct drm_dp_sideband_msg_tx *txmsg;
1899
	struct drm_dp_sideband_msg_tx *txmsg;
1885
 
1900
 
1886
	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
1901
	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
1887
	if (!txmsg)
1902
	if (!txmsg)
1888
		return -ENOMEM;
1903
		return -ENOMEM;
1889
 
1904
 
1890
	len = build_dpcd_read(txmsg, port->port_num, 0, 8);
1905
	len = build_dpcd_read(txmsg, port->port_num, 0, 8);
1891
	txmsg->dst = port->parent;
1906
	txmsg->dst = port->parent;
1892
 
1907
 
1893
	drm_dp_queue_down_tx(mgr, txmsg);
1908
	drm_dp_queue_down_tx(mgr, txmsg);
1894
 
1909
 
1895
	return 0;
1910
	return 0;
1896
}
1911
}
1897
#endif
1912
#endif
1898
 
1913
 
1899
static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
1914
static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
1900
				  struct drm_dp_mst_port *port,
1915
				  struct drm_dp_mst_port *port,
1901
				  int offset, int size, u8 *bytes)
1916
				  int offset, int size, u8 *bytes)
1902
{
1917
{
1903
	int len;
1918
	int len;
1904
	int ret;
1919
	int ret;
1905
	struct drm_dp_sideband_msg_tx *txmsg;
1920
	struct drm_dp_sideband_msg_tx *txmsg;
1906
	struct drm_dp_mst_branch *mstb;
1921
	struct drm_dp_mst_branch *mstb;
1907
 
1922
 
1908
	mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
1923
	mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
1909
	if (!mstb)
1924
	if (!mstb)
1910
		return -EINVAL;
1925
		return -EINVAL;
1911
 
1926
 
1912
	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
1927
	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
1913
	if (!txmsg) {
1928
	if (!txmsg) {
1914
		ret = -ENOMEM;
1929
		ret = -ENOMEM;
1915
		goto fail_put;
1930
		goto fail_put;
1916
	}
1931
	}
1917
 
1932
 
1918
	len = build_dpcd_write(txmsg, port->port_num, offset, size, bytes);
1933
	len = build_dpcd_write(txmsg, port->port_num, offset, size, bytes);
1919
	txmsg->dst = mstb;
1934
	txmsg->dst = mstb;
1920
 
1935
 
1921
	drm_dp_queue_down_tx(mgr, txmsg);
1936
	drm_dp_queue_down_tx(mgr, txmsg);
1922
 
1937
 
1923
	ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
1938
	ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
1924
	if (ret > 0) {
1939
	if (ret > 0) {
1925
		if (txmsg->reply.reply_type == 1) {
1940
		if (txmsg->reply.reply_type == 1) {
1926
			ret = -EINVAL;
1941
			ret = -EINVAL;
1927
		} else
1942
		} else
1928
			ret = 0;
1943
			ret = 0;
1929
	}
1944
	}
1930
	kfree(txmsg);
1945
	kfree(txmsg);
1931
fail_put:
1946
fail_put:
1932
	drm_dp_put_mst_branch_device(mstb);
1947
	drm_dp_put_mst_branch_device(mstb);
1933
	return ret;
1948
	return ret;
1934
}
1949
}
1935
 
1950
 
1936
static int drm_dp_encode_up_ack_reply(struct drm_dp_sideband_msg_tx *msg, u8 req_type)
1951
static int drm_dp_encode_up_ack_reply(struct drm_dp_sideband_msg_tx *msg, u8 req_type)
1937
{
1952
{
1938
	struct drm_dp_sideband_msg_reply_body reply;
1953
	struct drm_dp_sideband_msg_reply_body reply;
1939
 
1954
 
1940
	reply.reply_type = 1;
1955
	reply.reply_type = 1;
1941
	reply.req_type = req_type;
1956
	reply.req_type = req_type;
1942
	drm_dp_encode_sideband_reply(&reply, msg);
1957
	drm_dp_encode_sideband_reply(&reply, msg);
1943
	return 0;
1958
	return 0;
1944
}
1959
}
1945
 
1960
 
1946
static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr,
1961
static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr,
1947
				    struct drm_dp_mst_branch *mstb,
1962
				    struct drm_dp_mst_branch *mstb,
1948
				    int req_type, int seqno, bool broadcast)
1963
				    int req_type, int seqno, bool broadcast)
1949
{
1964
{
1950
	struct drm_dp_sideband_msg_tx *txmsg;
1965
	struct drm_dp_sideband_msg_tx *txmsg;
1951
 
1966
 
1952
	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
1967
	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
1953
	if (!txmsg)
1968
	if (!txmsg)
1954
		return -ENOMEM;
1969
		return -ENOMEM;
1955
 
1970
 
1956
	txmsg->dst = mstb;
1971
	txmsg->dst = mstb;
1957
	txmsg->seqno = seqno;
1972
	txmsg->seqno = seqno;
1958
	drm_dp_encode_up_ack_reply(txmsg, req_type);
1973
	drm_dp_encode_up_ack_reply(txmsg, req_type);
1959
 
1974
 
1960
	mutex_lock(&mgr->qlock);
1975
	mutex_lock(&mgr->qlock);
1961
 
1976
 
1962
	process_single_up_tx_qlock(mgr, txmsg);
1977
	process_single_up_tx_qlock(mgr, txmsg);
1963
 
1978
 
1964
	mutex_unlock(&mgr->qlock);
1979
	mutex_unlock(&mgr->qlock);
1965
 
1980
 
1966
	kfree(txmsg);
1981
	kfree(txmsg);
1967
	return 0;
1982
	return 0;
1968
}
1983
}
1969
 
1984
 
1970
static bool drm_dp_get_vc_payload_bw(int dp_link_bw,
1985
static bool drm_dp_get_vc_payload_bw(int dp_link_bw,
1971
				     int dp_link_count,
1986
				     int dp_link_count,
1972
				     int *out)
1987
				     int *out)
1973
{
1988
{
1974
	switch (dp_link_bw) {
1989
	switch (dp_link_bw) {
1975
	default:
1990
	default:
1976
		DRM_DEBUG_KMS("invalid link bandwidth in DPCD: %x (link count: %d)\n",
1991
		DRM_DEBUG_KMS("invalid link bandwidth in DPCD: %x (link count: %d)\n",
1977
			      dp_link_bw, dp_link_count);
1992
			      dp_link_bw, dp_link_count);
1978
		return false;
1993
		return false;
1979
 
1994
 
1980
	case DP_LINK_BW_1_62:
1995
	case DP_LINK_BW_1_62:
1981
		*out = 3 * dp_link_count;
1996
		*out = 3 * dp_link_count;
1982
		break;
1997
		break;
1983
	case DP_LINK_BW_2_7:
1998
	case DP_LINK_BW_2_7:
1984
		*out = 5 * dp_link_count;
1999
		*out = 5 * dp_link_count;
1985
		break;
2000
		break;
1986
	case DP_LINK_BW_5_4:
2001
	case DP_LINK_BW_5_4:
1987
		*out = 10 * dp_link_count;
2002
		*out = 10 * dp_link_count;
1988
		break;
2003
		break;
1989
	}
2004
	}
1990
	return true;
2005
	return true;
1991
}
2006
}
1992
 
2007
 
1993
/**
2008
/**
1994
 * drm_dp_mst_topology_mgr_set_mst() - Set the MST state for a topology manager
2009
 * drm_dp_mst_topology_mgr_set_mst() - Set the MST state for a topology manager
1995
 * @mgr: manager to set state for
2010
 * @mgr: manager to set state for
1996
 * @mst_state: true to enable MST on this connector - false to disable.
2011
 * @mst_state: true to enable MST on this connector - false to disable.
1997
 *
2012
 *
1998
 * This is called by the driver when it detects an MST capable device plugged
2013
 * This is called by the driver when it detects an MST capable device plugged
1999
 * into a DP MST capable port, or when a DP MST capable device is unplugged.
2014
 * into a DP MST capable port, or when a DP MST capable device is unplugged.
2000
 */
2015
 */
2001
int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool mst_state)
2016
int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool mst_state)
2002
{
2017
{
2003
	int ret = 0;
2018
	int ret = 0;
2004
	struct drm_dp_mst_branch *mstb = NULL;
2019
	struct drm_dp_mst_branch *mstb = NULL;
2005
 
2020
 
2006
	mutex_lock(&mgr->lock);
2021
	mutex_lock(&mgr->lock);
2007
	if (mst_state == mgr->mst_state)
2022
	if (mst_state == mgr->mst_state)
2008
		goto out_unlock;
2023
		goto out_unlock;
2009
 
2024
 
2010
	mgr->mst_state = mst_state;
2025
	mgr->mst_state = mst_state;
2011
	/* set the device into MST mode */
2026
	/* set the device into MST mode */
2012
	if (mst_state) {
2027
	if (mst_state) {
2013
		WARN_ON(mgr->mst_primary);
2028
		WARN_ON(mgr->mst_primary);
2014
 
2029
 
2015
		/* get dpcd info */
2030
		/* get dpcd info */
2016
		ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE);
2031
		ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE);
2017
		if (ret != DP_RECEIVER_CAP_SIZE) {
2032
		if (ret != DP_RECEIVER_CAP_SIZE) {
2018
			DRM_DEBUG_KMS("failed to read DPCD\n");
2033
			DRM_DEBUG_KMS("failed to read DPCD\n");
2019
			goto out_unlock;
2034
			goto out_unlock;
2020
		}
2035
		}
2021
 
2036
 
2022
		if (!drm_dp_get_vc_payload_bw(mgr->dpcd[1],
2037
		if (!drm_dp_get_vc_payload_bw(mgr->dpcd[1],
2023
					      mgr->dpcd[2] & DP_MAX_LANE_COUNT_MASK,
2038
					      mgr->dpcd[2] & DP_MAX_LANE_COUNT_MASK,
2024
					      &mgr->pbn_div)) {
2039
					      &mgr->pbn_div)) {
2025
			ret = -EINVAL;
2040
			ret = -EINVAL;
2026
			goto out_unlock;
2041
			goto out_unlock;
2027
		}
2042
		}
2028
 
2043
 
2029
		mgr->total_pbn = 2560;
2044
		mgr->total_pbn = 2560;
2030
		mgr->total_slots = DIV_ROUND_UP(mgr->total_pbn, mgr->pbn_div);
2045
		mgr->total_slots = DIV_ROUND_UP(mgr->total_pbn, mgr->pbn_div);
2031
		mgr->avail_slots = mgr->total_slots;
2046
		mgr->avail_slots = mgr->total_slots;
2032
 
2047
 
2033
		/* add initial branch device at LCT 1 */
2048
		/* add initial branch device at LCT 1 */
2034
		mstb = drm_dp_add_mst_branch_device(1, NULL);
2049
		mstb = drm_dp_add_mst_branch_device(1, NULL);
2035
		if (mstb == NULL) {
2050
		if (mstb == NULL) {
2036
			ret = -ENOMEM;
2051
			ret = -ENOMEM;
2037
			goto out_unlock;
2052
			goto out_unlock;
2038
		}
2053
		}
2039
		mstb->mgr = mgr;
2054
		mstb->mgr = mgr;
2040
 
2055
 
2041
		/* give this the main reference */
2056
		/* give this the main reference */
2042
		mgr->mst_primary = mstb;
2057
		mgr->mst_primary = mstb;
2043
		kref_get(&mgr->mst_primary->kref);
2058
		kref_get(&mgr->mst_primary->kref);
2044
 
2059
 
2045
		ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
2060
		ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
2046
					 DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
2061
					 DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
2047
		if (ret < 0) {
2062
		if (ret < 0) {
2048
			goto out_unlock;
2063
			goto out_unlock;
2049
		}
2064
		}
2050
 
2065
 
2051
		{
2066
		{
2052
			struct drm_dp_payload reset_pay;
2067
			struct drm_dp_payload reset_pay;
2053
			reset_pay.start_slot = 0;
2068
			reset_pay.start_slot = 0;
2054
			reset_pay.num_slots = 0x3f;
2069
			reset_pay.num_slots = 0x3f;
2055
			drm_dp_dpcd_write_payload(mgr, 0, &reset_pay);
2070
			drm_dp_dpcd_write_payload(mgr, 0, &reset_pay);
2056
		}
2071
		}
2057
 
2072
 
2058
//       queue_work(system_long_wq, &mgr->work);
2073
//       queue_work(system_long_wq, &mgr->work);
2059
 
2074
 
2060
		ret = 0;
2075
		ret = 0;
2061
	} else {
2076
	} else {
2062
		/* disable MST on the device */
2077
		/* disable MST on the device */
2063
		mstb = mgr->mst_primary;
2078
		mstb = mgr->mst_primary;
2064
		mgr->mst_primary = NULL;
2079
		mgr->mst_primary = NULL;
2065
		/* this can fail if the device is gone */
2080
		/* this can fail if the device is gone */
2066
		drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 0);
2081
		drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 0);
2067
		ret = 0;
2082
		ret = 0;
2068
		memset(mgr->payloads, 0, mgr->max_payloads * sizeof(struct drm_dp_payload));
2083
		memset(mgr->payloads, 0, mgr->max_payloads * sizeof(struct drm_dp_payload));
2069
		mgr->payload_mask = 0;
2084
		mgr->payload_mask = 0;
2070
		set_bit(0, &mgr->payload_mask);
2085
		set_bit(0, &mgr->payload_mask);
2071
		mgr->vcpi_mask = 0;
2086
		mgr->vcpi_mask = 0;
2072
	}
2087
	}
2073
 
2088
 
2074
out_unlock:
2089
out_unlock:
2075
	mutex_unlock(&mgr->lock);
2090
	mutex_unlock(&mgr->lock);
2076
	if (mstb)
2091
	if (mstb)
2077
		drm_dp_put_mst_branch_device(mstb);
2092
		drm_dp_put_mst_branch_device(mstb);
2078
	return ret;
2093
	return ret;
2079
 
2094
 
2080
}
2095
}
2081
EXPORT_SYMBOL(drm_dp_mst_topology_mgr_set_mst);
2096
EXPORT_SYMBOL(drm_dp_mst_topology_mgr_set_mst);
2082
 
2097
 
2083
/**
2098
/**
2084
 * drm_dp_mst_topology_mgr_suspend() - suspend the MST manager
2099
 * drm_dp_mst_topology_mgr_suspend() - suspend the MST manager
2085
 * @mgr: manager to suspend
2100
 * @mgr: manager to suspend
2086
 *
2101
 *
2087
 * This function tells the MST device that we can't handle UP messages
2102
 * This function tells the MST device that we can't handle UP messages
2088
 * anymore. This should stop it from sending any since we are suspended.
2103
 * anymore. This should stop it from sending any since we are suspended.
2089
 */
2104
 */
2090
void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr)
2105
void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr)
2091
{
2106
{
2092
	mutex_lock(&mgr->lock);
2107
	mutex_lock(&mgr->lock);
2093
	drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
2108
	drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
2094
			   DP_MST_EN | DP_UPSTREAM_IS_SRC);
2109
			   DP_MST_EN | DP_UPSTREAM_IS_SRC);
2095
	mutex_unlock(&mgr->lock);
2110
	mutex_unlock(&mgr->lock);
2096
}
2111
}
2097
EXPORT_SYMBOL(drm_dp_mst_topology_mgr_suspend);
2112
EXPORT_SYMBOL(drm_dp_mst_topology_mgr_suspend);
2098
 
2113
 
2099
/**
2114
/**
2100
 * drm_dp_mst_topology_mgr_resume() - resume the MST manager
2115
 * drm_dp_mst_topology_mgr_resume() - resume the MST manager
2101
 * @mgr: manager to resume
2116
 * @mgr: manager to resume
2102
 *
2117
 *
2103
 * This will fetch DPCD and see if the device is still there,
2118
 * This will fetch DPCD and see if the device is still there,
2104
 * if it is, it will rewrite the MSTM control bits, and return.
2119
 * if it is, it will rewrite the MSTM control bits, and return.
2105
 *
2120
 *
2106
 * if the device fails this returns -1, and the driver should do
2121
 * if the device fails this returns -1, and the driver should do
2107
 * a full MST reprobe, in case we were undocked.
2122
 * a full MST reprobe, in case we were undocked.
2108
 */
2123
 */
2109
int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr)
2124
int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr)
2110
{
2125
{
2111
	int ret = 0;
2126
	int ret = 0;
2112
 
2127
 
2113
	mutex_lock(&mgr->lock);
2128
	mutex_lock(&mgr->lock);
2114
 
2129
 
2115
	if (mgr->mst_primary) {
2130
	if (mgr->mst_primary) {
2116
		int sret;
2131
		int sret;
-
 
2132
		u8 guid[16];
-
 
2133
 
2117
		sret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE);
2134
		sret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE);
2118
		if (sret != DP_RECEIVER_CAP_SIZE) {
2135
		if (sret != DP_RECEIVER_CAP_SIZE) {
2119
			DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
2136
			DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
2120
			ret = -1;
2137
			ret = -1;
2121
			goto out_unlock;
2138
			goto out_unlock;
2122
		}
2139
		}
2123
 
2140
 
2124
		ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
2141
		ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
2125
					 DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
2142
					 DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
2126
		if (ret < 0) {
2143
		if (ret < 0) {
2127
			DRM_DEBUG_KMS("mst write failed - undocked during suspend?\n");
2144
			DRM_DEBUG_KMS("mst write failed - undocked during suspend?\n");
2128
			ret = -1;
2145
			ret = -1;
2129
			goto out_unlock;
2146
			goto out_unlock;
2130
		}
2147
		}
-
 
2148
 
-
 
2149
		/* Some hubs forget their guids after they resume */
-
 
2150
		sret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16);
-
 
2151
		if (sret != 16) {
-
 
2152
			DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
-
 
2153
			ret = -1;
-
 
2154
			goto out_unlock;
-
 
2155
		}
-
 
2156
		drm_dp_check_mstb_guid(mgr->mst_primary, guid);
-
 
2157
 
2131
		ret = 0;
2158
		ret = 0;
2132
	} else
2159
	} else
2133
		ret = -1;
2160
		ret = -1;
2134
 
2161
 
2135
out_unlock:
2162
out_unlock:
2136
	mutex_unlock(&mgr->lock);
2163
	mutex_unlock(&mgr->lock);
2137
	return ret;
2164
	return ret;
2138
}
2165
}
2139
EXPORT_SYMBOL(drm_dp_mst_topology_mgr_resume);
2166
EXPORT_SYMBOL(drm_dp_mst_topology_mgr_resume);
2140
 
2167
 
2141
static void drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up)
2168
static void drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up)
2142
{
2169
{
2143
	int len;
2170
	int len;
2144
	u8 replyblock[32];
2171
	u8 replyblock[32];
2145
	int replylen, origlen, curreply;
2172
	int replylen, origlen, curreply;
2146
	int ret;
2173
	int ret;
2147
	struct drm_dp_sideband_msg_rx *msg;
2174
	struct drm_dp_sideband_msg_rx *msg;
2148
	int basereg = up ? DP_SIDEBAND_MSG_UP_REQ_BASE : DP_SIDEBAND_MSG_DOWN_REP_BASE;
2175
	int basereg = up ? DP_SIDEBAND_MSG_UP_REQ_BASE : DP_SIDEBAND_MSG_DOWN_REP_BASE;
2149
	msg = up ? &mgr->up_req_recv : &mgr->down_rep_recv;
2176
	msg = up ? &mgr->up_req_recv : &mgr->down_rep_recv;
2150
 
2177
 
2151
	len = min(mgr->max_dpcd_transaction_bytes, 16);
2178
	len = min(mgr->max_dpcd_transaction_bytes, 16);
2152
	ret = drm_dp_dpcd_read(mgr->aux, basereg,
2179
	ret = drm_dp_dpcd_read(mgr->aux, basereg,
2153
			       replyblock, len);
2180
			       replyblock, len);
2154
	if (ret != len) {
2181
	if (ret != len) {
2155
		DRM_DEBUG_KMS("failed to read DPCD down rep %d %d\n", len, ret);
2182
		DRM_DEBUG_KMS("failed to read DPCD down rep %d %d\n", len, ret);
2156
		return;
2183
		return;
2157
	}
2184
	}
2158
	ret = drm_dp_sideband_msg_build(msg, replyblock, len, true);
2185
	ret = drm_dp_sideband_msg_build(msg, replyblock, len, true);
2159
	if (!ret) {
2186
	if (!ret) {
2160
		DRM_DEBUG_KMS("sideband msg build failed %d\n", replyblock[0]);
2187
		DRM_DEBUG_KMS("sideband msg build failed %d\n", replyblock[0]);
2161
		return;
2188
		return;
2162
	}
2189
	}
2163
	replylen = msg->curchunk_len + msg->curchunk_hdrlen;
2190
	replylen = msg->curchunk_len + msg->curchunk_hdrlen;
2164
 
2191
 
2165
	origlen = replylen;
2192
	origlen = replylen;
2166
	replylen -= len;
2193
	replylen -= len;
2167
	curreply = len;
2194
	curreply = len;
2168
	while (replylen > 0) {
2195
	while (replylen > 0) {
2169
		len = min3(replylen, mgr->max_dpcd_transaction_bytes, 16);
2196
		len = min3(replylen, mgr->max_dpcd_transaction_bytes, 16);
2170
		ret = drm_dp_dpcd_read(mgr->aux, basereg + curreply,
2197
		ret = drm_dp_dpcd_read(mgr->aux, basereg + curreply,
2171
				    replyblock, len);
2198
				    replyblock, len);
2172
		if (ret != len) {
2199
		if (ret != len) {
2173
			DRM_DEBUG_KMS("failed to read a chunk\n");
2200
			DRM_DEBUG_KMS("failed to read a chunk\n");
2174
		}
2201
		}
2175
		ret = drm_dp_sideband_msg_build(msg, replyblock, len, false);
2202
		ret = drm_dp_sideband_msg_build(msg, replyblock, len, false);
2176
		if (ret == false)
2203
		if (ret == false)
2177
			DRM_DEBUG_KMS("failed to build sideband msg\n");
2204
			DRM_DEBUG_KMS("failed to build sideband msg\n");
2178
		curreply += len;
2205
		curreply += len;
2179
		replylen -= len;
2206
		replylen -= len;
2180
	}
2207
	}
2181
}
2208
}
2182
 
2209
 
2183
static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
2210
static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
2184
{
2211
{
2185
	int ret = 0;
2212
	int ret = 0;
2186
 
2213
 
2187
	drm_dp_get_one_sb_msg(mgr, false);
2214
	drm_dp_get_one_sb_msg(mgr, false);
2188
 
2215
 
2189
	if (mgr->down_rep_recv.have_eomt) {
2216
	if (mgr->down_rep_recv.have_eomt) {
2190
		struct drm_dp_sideband_msg_tx *txmsg;
2217
		struct drm_dp_sideband_msg_tx *txmsg;
2191
		struct drm_dp_mst_branch *mstb;
2218
		struct drm_dp_mst_branch *mstb;
2192
		int slot = -1;
2219
		int slot = -1;
2193
		mstb = drm_dp_get_mst_branch_device(mgr,
2220
		mstb = drm_dp_get_mst_branch_device(mgr,
2194
						    mgr->down_rep_recv.initial_hdr.lct,
2221
						    mgr->down_rep_recv.initial_hdr.lct,
2195
						    mgr->down_rep_recv.initial_hdr.rad);
2222
						    mgr->down_rep_recv.initial_hdr.rad);
2196
 
2223
 
2197
		if (!mstb) {
2224
		if (!mstb) {
2198
			DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->down_rep_recv.initial_hdr.lct);
2225
			DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->down_rep_recv.initial_hdr.lct);
2199
			memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2226
			memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2200
			return 0;
2227
			return 0;
2201
		}
2228
		}
2202
 
2229
 
2203
		/* find the message */
2230
		/* find the message */
2204
		slot = mgr->down_rep_recv.initial_hdr.seqno;
2231
		slot = mgr->down_rep_recv.initial_hdr.seqno;
2205
		mutex_lock(&mgr->qlock);
2232
		mutex_lock(&mgr->qlock);
2206
		txmsg = mstb->tx_slots[slot];
2233
		txmsg = mstb->tx_slots[slot];
2207
		/* remove from slots */
2234
		/* remove from slots */
2208
		mutex_unlock(&mgr->qlock);
2235
		mutex_unlock(&mgr->qlock);
2209
 
2236
 
2210
		if (!txmsg) {
2237
		if (!txmsg) {
2211
			DRM_DEBUG_KMS("Got MST reply with no msg %p %d %d %02x %02x\n",
2238
			DRM_DEBUG_KMS("Got MST reply with no msg %p %d %d %02x %02x\n",
2212
			       mstb,
2239
			       mstb,
2213
			       mgr->down_rep_recv.initial_hdr.seqno,
2240
			       mgr->down_rep_recv.initial_hdr.seqno,
2214
			       mgr->down_rep_recv.initial_hdr.lct,
2241
			       mgr->down_rep_recv.initial_hdr.lct,
2215
				      mgr->down_rep_recv.initial_hdr.rad[0],
2242
				      mgr->down_rep_recv.initial_hdr.rad[0],
2216
				      mgr->down_rep_recv.msg[0]);
2243
				      mgr->down_rep_recv.msg[0]);
2217
			drm_dp_put_mst_branch_device(mstb);
2244
			drm_dp_put_mst_branch_device(mstb);
2218
			memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2245
			memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2219
			return 0;
2246
			return 0;
2220
		}
2247
		}
2221
 
2248
 
2222
		drm_dp_sideband_parse_reply(&mgr->down_rep_recv, &txmsg->reply);
2249
		drm_dp_sideband_parse_reply(&mgr->down_rep_recv, &txmsg->reply);
2223
		if (txmsg->reply.reply_type == 1) {
2250
		if (txmsg->reply.reply_type == 1) {
2224
			DRM_DEBUG_KMS("Got NAK reply: req 0x%02x, reason 0x%02x, nak data 0x%02x\n", txmsg->reply.req_type, txmsg->reply.u.nak.reason, txmsg->reply.u.nak.nak_data);
2251
			DRM_DEBUG_KMS("Got NAK reply: req 0x%02x, reason 0x%02x, nak data 0x%02x\n", txmsg->reply.req_type, txmsg->reply.u.nak.reason, txmsg->reply.u.nak.nak_data);
2225
		}
2252
		}
2226
 
2253
 
2227
		memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2254
		memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2228
		drm_dp_put_mst_branch_device(mstb);
2255
		drm_dp_put_mst_branch_device(mstb);
2229
 
2256
 
2230
		mutex_lock(&mgr->qlock);
2257
		mutex_lock(&mgr->qlock);
2231
		txmsg->state = DRM_DP_SIDEBAND_TX_RX;
2258
		txmsg->state = DRM_DP_SIDEBAND_TX_RX;
2232
		mstb->tx_slots[slot] = NULL;
2259
		mstb->tx_slots[slot] = NULL;
2233
		mutex_unlock(&mgr->qlock);
2260
		mutex_unlock(&mgr->qlock);
2234
 
2261
 
2235
//       wake_up(&mgr->tx_waitq);
2262
//       wake_up(&mgr->tx_waitq);
2236
	}
2263
	}
2237
	return ret;
2264
	return ret;
2238
}
2265
}
2239
 
2266
 
2240
static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
2267
static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
2241
{
2268
{
2242
	int ret = 0;
2269
	int ret = 0;
2243
	drm_dp_get_one_sb_msg(mgr, true);
2270
	drm_dp_get_one_sb_msg(mgr, true);
2244
 
2271
 
2245
	if (mgr->up_req_recv.have_eomt) {
2272
	if (mgr->up_req_recv.have_eomt) {
2246
		struct drm_dp_sideband_msg_req_body msg;
2273
		struct drm_dp_sideband_msg_req_body msg;
2247
		struct drm_dp_mst_branch *mstb = NULL;
2274
		struct drm_dp_mst_branch *mstb = NULL;
2248
		bool seqno;
2275
		bool seqno;
2249
 
2276
 
2250
		if (!mgr->up_req_recv.initial_hdr.broadcast) {
2277
		if (!mgr->up_req_recv.initial_hdr.broadcast) {
2251
		mstb = drm_dp_get_mst_branch_device(mgr,
2278
		mstb = drm_dp_get_mst_branch_device(mgr,
2252
						    mgr->up_req_recv.initial_hdr.lct,
2279
						    mgr->up_req_recv.initial_hdr.lct,
2253
						    mgr->up_req_recv.initial_hdr.rad);
2280
						    mgr->up_req_recv.initial_hdr.rad);
2254
		if (!mstb) {
2281
		if (!mstb) {
2255
			DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
2282
			DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
2256
			memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2283
			memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2257
			return 0;
2284
			return 0;
2258
		}
2285
		}
2259
		}
2286
		}
2260
 
2287
 
2261
		seqno = mgr->up_req_recv.initial_hdr.seqno;
2288
		seqno = mgr->up_req_recv.initial_hdr.seqno;
2262
		drm_dp_sideband_parse_req(&mgr->up_req_recv, &msg);
2289
		drm_dp_sideband_parse_req(&mgr->up_req_recv, &msg);
2263
 
2290
 
2264
		if (msg.req_type == DP_CONNECTION_STATUS_NOTIFY) {
2291
		if (msg.req_type == DP_CONNECTION_STATUS_NOTIFY) {
2265
			drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, msg.req_type, seqno, false);
2292
			drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, msg.req_type, seqno, false);
2266
 
2293
 
2267
			if (!mstb)
2294
			if (!mstb)
2268
				mstb = drm_dp_get_mst_branch_device_by_guid(mgr, msg.u.conn_stat.guid);
2295
				mstb = drm_dp_get_mst_branch_device_by_guid(mgr, msg.u.conn_stat.guid);
2269
 
2296
 
2270
			if (!mstb) {
2297
			if (!mstb) {
2271
				DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
2298
				DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
2272
				memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2299
				memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2273
				return 0;
2300
				return 0;
2274
			}
2301
			}
2275
 
2302
 
2276
			drm_dp_update_port(mstb, &msg.u.conn_stat);
2303
			drm_dp_update_port(mstb, &msg.u.conn_stat);
2277
 
2304
 
2278
			DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n", msg.u.conn_stat.port_number, msg.u.conn_stat.legacy_device_plug_status, msg.u.conn_stat.displayport_device_plug_status, msg.u.conn_stat.message_capability_status, msg.u.conn_stat.input_port, msg.u.conn_stat.peer_device_type);
2305
			DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n", msg.u.conn_stat.port_number, msg.u.conn_stat.legacy_device_plug_status, msg.u.conn_stat.displayport_device_plug_status, msg.u.conn_stat.message_capability_status, msg.u.conn_stat.input_port, msg.u.conn_stat.peer_device_type);
2279
			(*mgr->cbs->hotplug)(mgr);
2306
			(*mgr->cbs->hotplug)(mgr);
2280
 
2307
 
2281
		} else if (msg.req_type == DP_RESOURCE_STATUS_NOTIFY) {
2308
		} else if (msg.req_type == DP_RESOURCE_STATUS_NOTIFY) {
2282
			drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, msg.req_type, seqno, false);
2309
			drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, msg.req_type, seqno, false);
2283
			if (!mstb)
2310
			if (!mstb)
2284
				mstb = drm_dp_get_mst_branch_device_by_guid(mgr, msg.u.resource_stat.guid);
2311
				mstb = drm_dp_get_mst_branch_device_by_guid(mgr, msg.u.resource_stat.guid);
2285
 
2312
 
2286
			if (!mstb) {
2313
			if (!mstb) {
2287
				DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
2314
				DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
2288
				memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2315
				memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2289
				return 0;
2316
				return 0;
2290
			}
2317
			}
2291
 
2318
 
2292
			DRM_DEBUG_KMS("Got RSN: pn: %d avail_pbn %d\n", msg.u.resource_stat.port_number, msg.u.resource_stat.available_pbn);
2319
			DRM_DEBUG_KMS("Got RSN: pn: %d avail_pbn %d\n", msg.u.resource_stat.port_number, msg.u.resource_stat.available_pbn);
2293
		}
2320
		}
2294
 
2321
 
2295
		drm_dp_put_mst_branch_device(mstb);
2322
		drm_dp_put_mst_branch_device(mstb);
2296
		memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2323
		memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2297
	}
2324
	}
2298
	return ret;
2325
	return ret;
2299
}
2326
}
2300
 
2327
 
2301
/**
2328
/**
2302
 * drm_dp_mst_hpd_irq() - MST hotplug IRQ notify
2329
 * drm_dp_mst_hpd_irq() - MST hotplug IRQ notify
2303
 * @mgr: manager to notify irq for.
2330
 * @mgr: manager to notify irq for.
2304
 * @esi: 4 bytes from SINK_COUNT_ESI
2331
 * @esi: 4 bytes from SINK_COUNT_ESI
2305
 * @handled: whether the hpd interrupt was consumed or not
2332
 * @handled: whether the hpd interrupt was consumed or not
2306
 *
2333
 *
2307
 * This should be called from the driver when it detects a short IRQ,
2334
 * This should be called from the driver when it detects a short IRQ,
2308
 * along with the value of the DEVICE_SERVICE_IRQ_VECTOR_ESI0. The
2335
 * along with the value of the DEVICE_SERVICE_IRQ_VECTOR_ESI0. The
2309
 * topology manager will process the sideband messages received as a result
2336
 * topology manager will process the sideband messages received as a result
2310
 * of this.
2337
 * of this.
2311
 */
2338
 */
2312
int drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr *mgr, u8 *esi, bool *handled)
2339
int drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr *mgr, u8 *esi, bool *handled)
2313
{
2340
{
2314
	int ret = 0;
2341
	int ret = 0;
2315
	int sc;
2342
	int sc;
2316
	*handled = false;
2343
	*handled = false;
2317
	sc = esi[0] & 0x3f;
2344
	sc = esi[0] & 0x3f;
2318
 
2345
 
2319
	if (sc != mgr->sink_count) {
2346
	if (sc != mgr->sink_count) {
2320
		mgr->sink_count = sc;
2347
		mgr->sink_count = sc;
2321
		*handled = true;
2348
		*handled = true;
2322
	}
2349
	}
2323
 
2350
 
2324
	if (esi[1] & DP_DOWN_REP_MSG_RDY) {
2351
	if (esi[1] & DP_DOWN_REP_MSG_RDY) {
2325
		ret = drm_dp_mst_handle_down_rep(mgr);
2352
		ret = drm_dp_mst_handle_down_rep(mgr);
2326
		*handled = true;
2353
		*handled = true;
2327
	}
2354
	}
2328
 
2355
 
2329
	if (esi[1] & DP_UP_REQ_MSG_RDY) {
2356
	if (esi[1] & DP_UP_REQ_MSG_RDY) {
2330
		ret |= drm_dp_mst_handle_up_req(mgr);
2357
		ret |= drm_dp_mst_handle_up_req(mgr);
2331
		*handled = true;
2358
		*handled = true;
2332
	}
2359
	}
2333
 
2360
 
2334
	drm_dp_mst_kick_tx(mgr);
2361
	drm_dp_mst_kick_tx(mgr);
2335
	return ret;
2362
	return ret;
2336
}
2363
}
2337
EXPORT_SYMBOL(drm_dp_mst_hpd_irq);
2364
EXPORT_SYMBOL(drm_dp_mst_hpd_irq);
2338
 
2365
 
2339
/**
2366
/**
2340
 * drm_dp_mst_detect_port() - get connection status for an MST port
2367
 * drm_dp_mst_detect_port() - get connection status for an MST port
2341
 * @mgr: manager for this port
2368
 * @mgr: manager for this port
2342
 * @port: unverified pointer to a port
2369
 * @port: unverified pointer to a port
2343
 *
2370
 *
2344
 * This returns the current connection state for a port. It validates the
2371
 * This returns the current connection state for a port. It validates the
2345
 * port pointer still exists so the caller doesn't require a reference
2372
 * port pointer still exists so the caller doesn't require a reference
2346
 */
2373
 */
2347
enum drm_connector_status drm_dp_mst_detect_port(struct drm_connector *connector,
2374
enum drm_connector_status drm_dp_mst_detect_port(struct drm_connector *connector,
2348
						 struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
2375
						 struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
2349
{
2376
{
2350
	enum drm_connector_status status = connector_status_disconnected;
2377
	enum drm_connector_status status = connector_status_disconnected;
2351
 
2378
 
2352
	/* we need to search for the port in the mgr in case its gone */
2379
	/* we need to search for the port in the mgr in case its gone */
2353
	port = drm_dp_get_validated_port_ref(mgr, port);
2380
	port = drm_dp_get_validated_port_ref(mgr, port);
2354
	if (!port)
2381
	if (!port)
2355
		return connector_status_disconnected;
2382
		return connector_status_disconnected;
2356
 
2383
 
2357
	if (!port->ddps)
2384
	if (!port->ddps)
2358
		goto out;
2385
		goto out;
2359
 
2386
 
2360
	switch (port->pdt) {
2387
	switch (port->pdt) {
2361
	case DP_PEER_DEVICE_NONE:
2388
	case DP_PEER_DEVICE_NONE:
2362
	case DP_PEER_DEVICE_MST_BRANCHING:
2389
	case DP_PEER_DEVICE_MST_BRANCHING:
2363
		break;
2390
		break;
2364
 
2391
 
2365
	case DP_PEER_DEVICE_SST_SINK:
2392
	case DP_PEER_DEVICE_SST_SINK:
2366
		status = connector_status_connected;
2393
		status = connector_status_connected;
2367
		/* for logical ports - cache the EDID */
2394
		/* for logical ports - cache the EDID */
2368
		if (port->port_num >= 8 && !port->cached_edid) {
2395
		if (port->port_num >= 8 && !port->cached_edid) {
2369
			port->cached_edid = drm_get_edid(connector, &port->aux.ddc);
2396
			port->cached_edid = drm_get_edid(connector, &port->aux.ddc);
2370
		}
2397
		}
2371
		break;
2398
		break;
2372
	case DP_PEER_DEVICE_DP_LEGACY_CONV:
2399
	case DP_PEER_DEVICE_DP_LEGACY_CONV:
2373
		if (port->ldps)
2400
		if (port->ldps)
2374
			status = connector_status_connected;
2401
			status = connector_status_connected;
2375
		break;
2402
		break;
2376
	}
2403
	}
2377
out:
2404
out:
2378
	drm_dp_put_port(port);
2405
	drm_dp_put_port(port);
2379
	return status;
2406
	return status;
2380
}
2407
}
2381
EXPORT_SYMBOL(drm_dp_mst_detect_port);
2408
EXPORT_SYMBOL(drm_dp_mst_detect_port);
2382
 
2409
 
2383
/**
2410
/**
2384
 * drm_dp_mst_get_edid() - get EDID for an MST port
2411
 * drm_dp_mst_get_edid() - get EDID for an MST port
2385
 * @connector: toplevel connector to get EDID for
2412
 * @connector: toplevel connector to get EDID for
2386
 * @mgr: manager for this port
2413
 * @mgr: manager for this port
2387
 * @port: unverified pointer to a port.
2414
 * @port: unverified pointer to a port.
2388
 *
2415
 *
2389
 * This returns an EDID for the port connected to a connector,
2416
 * This returns an EDID for the port connected to a connector,
2390
 * It validates the pointer still exists so the caller doesn't require a
2417
 * It validates the pointer still exists so the caller doesn't require a
2391
 * reference.
2418
 * reference.
2392
 */
2419
 */
2393
struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
2420
struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
2394
{
2421
{
2395
	struct edid *edid = NULL;
2422
	struct edid *edid = NULL;
2396
 
2423
 
2397
	/* we need to search for the port in the mgr in case its gone */
2424
	/* we need to search for the port in the mgr in case its gone */
2398
	port = drm_dp_get_validated_port_ref(mgr, port);
2425
	port = drm_dp_get_validated_port_ref(mgr, port);
2399
	if (!port)
2426
	if (!port)
2400
		return NULL;
2427
		return NULL;
2401
 
2428
 
2402
	if (port->cached_edid)
2429
	if (port->cached_edid)
2403
		edid = drm_edid_duplicate(port->cached_edid);
2430
		edid = drm_edid_duplicate(port->cached_edid);
2404
	else {
2431
	else {
2405
		edid = drm_get_edid(connector, &port->aux.ddc);
2432
		edid = drm_get_edid(connector, &port->aux.ddc);
2406
		drm_mode_connector_set_tile_property(connector);
2433
		drm_mode_connector_set_tile_property(connector);
2407
	}
2434
	}
2408
	drm_dp_put_port(port);
2435
	drm_dp_put_port(port);
2409
	return edid;
2436
	return edid;
2410
}
2437
}
2411
EXPORT_SYMBOL(drm_dp_mst_get_edid);
2438
EXPORT_SYMBOL(drm_dp_mst_get_edid);
2412
 
2439
 
2413
/**
2440
/**
2414
 * drm_dp_find_vcpi_slots() - find slots for this PBN value
2441
 * drm_dp_find_vcpi_slots() - find slots for this PBN value
2415
 * @mgr: manager to use
2442
 * @mgr: manager to use
2416
 * @pbn: payload bandwidth to convert into slots.
2443
 * @pbn: payload bandwidth to convert into slots.
2417
 */
2444
 */
2418
int drm_dp_find_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr,
2445
int drm_dp_find_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr,
2419
			   int pbn)
2446
			   int pbn)
2420
{
2447
{
2421
	int num_slots;
2448
	int num_slots;
2422
 
2449
 
2423
	num_slots = DIV_ROUND_UP(pbn, mgr->pbn_div);
2450
	num_slots = DIV_ROUND_UP(pbn, mgr->pbn_div);
2424
 
2451
 
2425
	if (num_slots > mgr->avail_slots)
2452
	if (num_slots > mgr->avail_slots)
2426
		return -ENOSPC;
2453
		return -ENOSPC;
2427
	return num_slots;
2454
	return num_slots;
2428
}
2455
}
2429
EXPORT_SYMBOL(drm_dp_find_vcpi_slots);
2456
EXPORT_SYMBOL(drm_dp_find_vcpi_slots);
2430
 
2457
 
2431
static int drm_dp_init_vcpi(struct drm_dp_mst_topology_mgr *mgr,
2458
static int drm_dp_init_vcpi(struct drm_dp_mst_topology_mgr *mgr,
2432
			    struct drm_dp_vcpi *vcpi, int pbn)
2459
			    struct drm_dp_vcpi *vcpi, int pbn)
2433
{
2460
{
2434
	int num_slots;
2461
	int num_slots;
2435
	int ret;
2462
	int ret;
2436
 
2463
 
2437
	num_slots = DIV_ROUND_UP(pbn, mgr->pbn_div);
2464
	num_slots = DIV_ROUND_UP(pbn, mgr->pbn_div);
2438
 
2465
 
2439
	if (num_slots > mgr->avail_slots)
2466
	if (num_slots > mgr->avail_slots)
2440
		return -ENOSPC;
2467
		return -ENOSPC;
2441
 
2468
 
2442
	vcpi->pbn = pbn;
2469
	vcpi->pbn = pbn;
2443
	vcpi->aligned_pbn = num_slots * mgr->pbn_div;
2470
	vcpi->aligned_pbn = num_slots * mgr->pbn_div;
2444
	vcpi->num_slots = num_slots;
2471
	vcpi->num_slots = num_slots;
2445
 
2472
 
2446
	ret = drm_dp_mst_assign_payload_id(mgr, vcpi);
2473
	ret = drm_dp_mst_assign_payload_id(mgr, vcpi);
2447
	if (ret < 0)
2474
	if (ret < 0)
2448
		return ret;
2475
		return ret;
2449
	return 0;
2476
	return 0;
2450
}
2477
}
2451
 
2478
 
2452
/**
2479
/**
2453
 * drm_dp_mst_allocate_vcpi() - Allocate a virtual channel
2480
 * drm_dp_mst_allocate_vcpi() - Allocate a virtual channel
2454
 * @mgr: manager for this port
2481
 * @mgr: manager for this port
2455
 * @port: port to allocate a virtual channel for.
2482
 * @port: port to allocate a virtual channel for.
2456
 * @pbn: payload bandwidth number to request
2483
 * @pbn: payload bandwidth number to request
2457
 * @slots: returned number of slots for this PBN.
2484
 * @slots: returned number of slots for this PBN.
2458
 */
2485
 */
2459
bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, int pbn, int *slots)
2486
bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, int pbn, int *slots)
2460
{
2487
{
2461
	int ret;
2488
	int ret;
2462
 
2489
 
2463
	port = drm_dp_get_validated_port_ref(mgr, port);
2490
	port = drm_dp_get_validated_port_ref(mgr, port);
2464
	if (!port)
2491
	if (!port)
2465
		return false;
2492
		return false;
2466
 
2493
 
2467
	if (port->vcpi.vcpi > 0) {
2494
	if (port->vcpi.vcpi > 0) {
2468
		DRM_DEBUG_KMS("payload: vcpi %d already allocated for pbn %d - requested pbn %d\n", port->vcpi.vcpi, port->vcpi.pbn, pbn);
2495
		DRM_DEBUG_KMS("payload: vcpi %d already allocated for pbn %d - requested pbn %d\n", port->vcpi.vcpi, port->vcpi.pbn, pbn);
2469
		if (pbn == port->vcpi.pbn) {
2496
		if (pbn == port->vcpi.pbn) {
2470
			*slots = port->vcpi.num_slots;
2497
			*slots = port->vcpi.num_slots;
2471
			drm_dp_put_port(port);
2498
			drm_dp_put_port(port);
2472
			return true;
2499
			return true;
2473
		}
2500
		}
2474
	}
2501
	}
2475
 
2502
 
2476
	ret = drm_dp_init_vcpi(mgr, &port->vcpi, pbn);
2503
	ret = drm_dp_init_vcpi(mgr, &port->vcpi, pbn);
2477
	if (ret) {
2504
	if (ret) {
2478
		DRM_DEBUG_KMS("failed to init vcpi %d %d %d\n", DIV_ROUND_UP(pbn, mgr->pbn_div), mgr->avail_slots, ret);
2505
		DRM_DEBUG_KMS("failed to init vcpi %d %d %d\n", DIV_ROUND_UP(pbn, mgr->pbn_div), mgr->avail_slots, ret);
2479
		goto out;
2506
		goto out;
2480
	}
2507
	}
2481
	DRM_DEBUG_KMS("initing vcpi for %d %d\n", pbn, port->vcpi.num_slots);
2508
	DRM_DEBUG_KMS("initing vcpi for %d %d\n", pbn, port->vcpi.num_slots);
2482
	*slots = port->vcpi.num_slots;
2509
	*slots = port->vcpi.num_slots;
2483
 
2510
 
2484
	drm_dp_put_port(port);
2511
	drm_dp_put_port(port);
2485
	return true;
2512
	return true;
2486
out:
2513
out:
2487
	return false;
2514
	return false;
2488
}
2515
}
2489
EXPORT_SYMBOL(drm_dp_mst_allocate_vcpi);
2516
EXPORT_SYMBOL(drm_dp_mst_allocate_vcpi);
2490
 
2517
 
2491
int drm_dp_mst_get_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
2518
int drm_dp_mst_get_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
2492
{
2519
{
2493
	int slots = 0;
2520
	int slots = 0;
2494
	port = drm_dp_get_validated_port_ref(mgr, port);
2521
	port = drm_dp_get_validated_port_ref(mgr, port);
2495
	if (!port)
2522
	if (!port)
2496
		return slots;
2523
		return slots;
2497
 
2524
 
2498
	slots = port->vcpi.num_slots;
2525
	slots = port->vcpi.num_slots;
2499
	drm_dp_put_port(port);
2526
	drm_dp_put_port(port);
2500
	return slots;
2527
	return slots;
2501
}
2528
}
2502
EXPORT_SYMBOL(drm_dp_mst_get_vcpi_slots);
2529
EXPORT_SYMBOL(drm_dp_mst_get_vcpi_slots);
2503
 
2530
 
2504
/**
2531
/**
2505
 * drm_dp_mst_reset_vcpi_slots() - Reset number of slots to 0 for VCPI
2532
 * drm_dp_mst_reset_vcpi_slots() - Reset number of slots to 0 for VCPI
2506
 * @mgr: manager for this port
2533
 * @mgr: manager for this port
2507
 * @port: unverified pointer to a port.
2534
 * @port: unverified pointer to a port.
2508
 *
2535
 *
2509
 * This just resets the number of slots for the ports VCPI for later programming.
2536
 * This just resets the number of slots for the ports VCPI for later programming.
2510
 */
2537
 */
2511
void drm_dp_mst_reset_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
2538
void drm_dp_mst_reset_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
2512
{
2539
{
2513
	port = drm_dp_get_validated_port_ref(mgr, port);
2540
	port = drm_dp_get_validated_port_ref(mgr, port);
2514
	if (!port)
2541
	if (!port)
2515
		return;
2542
		return;
2516
	port->vcpi.num_slots = 0;
2543
	port->vcpi.num_slots = 0;
2517
	drm_dp_put_port(port);
2544
	drm_dp_put_port(port);
2518
}
2545
}
2519
EXPORT_SYMBOL(drm_dp_mst_reset_vcpi_slots);
2546
EXPORT_SYMBOL(drm_dp_mst_reset_vcpi_slots);
2520
 
2547
 
2521
/**
2548
/**
2522
 * drm_dp_mst_deallocate_vcpi() - deallocate a VCPI
2549
 * drm_dp_mst_deallocate_vcpi() - deallocate a VCPI
2523
 * @mgr: manager for this port
2550
 * @mgr: manager for this port
2524
 * @port: unverified port to deallocate vcpi for
2551
 * @port: unverified port to deallocate vcpi for
2525
 */
2552
 */
2526
void drm_dp_mst_deallocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
2553
void drm_dp_mst_deallocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
2527
{
2554
{
2528
	port = drm_dp_get_validated_port_ref(mgr, port);
2555
	port = drm_dp_get_validated_port_ref(mgr, port);
2529
	if (!port)
2556
	if (!port)
2530
		return;
2557
		return;
2531
 
2558
 
2532
	drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
2559
	drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
2533
	port->vcpi.num_slots = 0;
2560
	port->vcpi.num_slots = 0;
2534
	port->vcpi.pbn = 0;
2561
	port->vcpi.pbn = 0;
2535
	port->vcpi.aligned_pbn = 0;
2562
	port->vcpi.aligned_pbn = 0;
2536
	port->vcpi.vcpi = 0;
2563
	port->vcpi.vcpi = 0;
2537
	drm_dp_put_port(port);
2564
	drm_dp_put_port(port);
2538
}
2565
}
2539
EXPORT_SYMBOL(drm_dp_mst_deallocate_vcpi);
2566
EXPORT_SYMBOL(drm_dp_mst_deallocate_vcpi);
2540
 
2567
 
2541
static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
2568
static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
2542
				     int id, struct drm_dp_payload *payload)
2569
				     int id, struct drm_dp_payload *payload)
2543
{
2570
{
2544
	u8 payload_alloc[3], status;
2571
	u8 payload_alloc[3], status;
2545
	int ret;
2572
	int ret;
2546
	int retries = 0;
2573
	int retries = 0;
2547
 
2574
 
2548
	drm_dp_dpcd_writeb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS,
2575
	drm_dp_dpcd_writeb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS,
2549
			   DP_PAYLOAD_TABLE_UPDATED);
2576
			   DP_PAYLOAD_TABLE_UPDATED);
2550
 
2577
 
2551
	payload_alloc[0] = id;
2578
	payload_alloc[0] = id;
2552
	payload_alloc[1] = payload->start_slot;
2579
	payload_alloc[1] = payload->start_slot;
2553
	payload_alloc[2] = payload->num_slots;
2580
	payload_alloc[2] = payload->num_slots;
2554
 
2581
 
2555
	ret = drm_dp_dpcd_write(mgr->aux, DP_PAYLOAD_ALLOCATE_SET, payload_alloc, 3);
2582
	ret = drm_dp_dpcd_write(mgr->aux, DP_PAYLOAD_ALLOCATE_SET, payload_alloc, 3);
2556
	if (ret != 3) {
2583
	if (ret != 3) {
2557
		DRM_DEBUG_KMS("failed to write payload allocation %d\n", ret);
2584
		DRM_DEBUG_KMS("failed to write payload allocation %d\n", ret);
2558
		goto fail;
2585
		goto fail;
2559
	}
2586
	}
2560
 
2587
 
2561
retry:
2588
retry:
2562
	ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
2589
	ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
2563
	if (ret < 0) {
2590
	if (ret < 0) {
2564
		DRM_DEBUG_KMS("failed to read payload table status %d\n", ret);
2591
		DRM_DEBUG_KMS("failed to read payload table status %d\n", ret);
2565
		goto fail;
2592
		goto fail;
2566
	}
2593
	}
2567
 
2594
 
2568
	if (!(status & DP_PAYLOAD_TABLE_UPDATED)) {
2595
	if (!(status & DP_PAYLOAD_TABLE_UPDATED)) {
2569
		retries++;
2596
		retries++;
2570
		if (retries < 20) {
2597
		if (retries < 20) {
2571
			usleep_range(10000, 20000);
2598
			usleep_range(10000, 20000);
2572
			goto retry;
2599
			goto retry;
2573
		}
2600
		}
2574
		DRM_DEBUG_KMS("status not set after read payload table status %d\n", status);
2601
		DRM_DEBUG_KMS("status not set after read payload table status %d\n", status);
2575
		ret = -EINVAL;
2602
		ret = -EINVAL;
2576
		goto fail;
2603
		goto fail;
2577
	}
2604
	}
2578
	ret = 0;
2605
	ret = 0;
2579
fail:
2606
fail:
2580
	return ret;
2607
	return ret;
2581
}
2608
}
2582
 
2609
 
2583
 
2610
 
2584
/**
2611
/**
2585
 * drm_dp_check_act_status() - Check ACT handled status.
2612
 * drm_dp_check_act_status() - Check ACT handled status.
2586
 * @mgr: manager to use
2613
 * @mgr: manager to use
2587
 *
2614
 *
2588
 * Check the payload status bits in the DPCD for ACT handled completion.
2615
 * Check the payload status bits in the DPCD for ACT handled completion.
2589
 */
2616
 */
2590
int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr)
2617
int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr)
2591
{
2618
{
2592
	u8 status;
2619
	u8 status;
2593
	int ret;
2620
	int ret;
2594
	int count = 0;
2621
	int count = 0;
2595
 
2622
 
2596
	do {
2623
	do {
2597
		ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
2624
		ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
2598
 
2625
 
2599
		if (ret < 0) {
2626
		if (ret < 0) {
2600
			DRM_DEBUG_KMS("failed to read payload table status %d\n", ret);
2627
			DRM_DEBUG_KMS("failed to read payload table status %d\n", ret);
2601
			goto fail;
2628
			goto fail;
2602
		}
2629
		}
2603
 
2630
 
2604
		if (status & DP_PAYLOAD_ACT_HANDLED)
2631
		if (status & DP_PAYLOAD_ACT_HANDLED)
2605
			break;
2632
			break;
2606
		count++;
2633
		count++;
2607
		udelay(100);
2634
		udelay(100);
2608
 
2635
 
2609
	} while (count < 30);
2636
	} while (count < 30);
2610
 
2637
 
2611
	if (!(status & DP_PAYLOAD_ACT_HANDLED)) {
2638
	if (!(status & DP_PAYLOAD_ACT_HANDLED)) {
2612
		DRM_DEBUG_KMS("failed to get ACT bit %d after %d retries\n", status, count);
2639
		DRM_DEBUG_KMS("failed to get ACT bit %d after %d retries\n", status, count);
2613
		ret = -EINVAL;
2640
		ret = -EINVAL;
2614
		goto fail;
2641
		goto fail;
2615
	}
2642
	}
2616
	return 0;
2643
	return 0;
2617
fail:
2644
fail:
2618
	return ret;
2645
	return ret;
2619
}
2646
}
2620
EXPORT_SYMBOL(drm_dp_check_act_status);
2647
EXPORT_SYMBOL(drm_dp_check_act_status);
2621
 
2648
 
2622
/**
2649
/**
2623
 * drm_dp_calc_pbn_mode() - Calculate the PBN for a mode.
2650
 * drm_dp_calc_pbn_mode() - Calculate the PBN for a mode.
2624
 * @clock: dot clock for the mode
2651
 * @clock: dot clock for the mode
2625
 * @bpp: bpp for the mode.
2652
 * @bpp: bpp for the mode.
2626
 *
2653
 *
2627
 * This uses the formula in the spec to calculate the PBN value for a mode.
2654
 * This uses the formula in the spec to calculate the PBN value for a mode.
2628
 */
2655
 */
2629
int drm_dp_calc_pbn_mode(int clock, int bpp)
2656
int drm_dp_calc_pbn_mode(int clock, int bpp)
2630
{
2657
{
2631
	u64 kbps;
2658
	u64 kbps;
2632
	s64 peak_kbps;
2659
	s64 peak_kbps;
2633
	u32 numerator;
2660
	u32 numerator;
2634
	u32 denominator;
2661
	u32 denominator;
2635
 
2662
 
2636
	kbps = clock * bpp;
2663
	kbps = clock * bpp;
2637
 
2664
 
2638
	/*
2665
	/*
2639
	 * margin 5300ppm + 300ppm ~ 0.6% as per spec, factor is 1.006
2666
	 * margin 5300ppm + 300ppm ~ 0.6% as per spec, factor is 1.006
2640
	 * The unit of 54/64Mbytes/sec is an arbitrary unit chosen based on
2667
	 * The unit of 54/64Mbytes/sec is an arbitrary unit chosen based on
2641
	 * common multiplier to render an integer PBN for all link rate/lane
2668
	 * common multiplier to render an integer PBN for all link rate/lane
2642
	 * counts combinations
2669
	 * counts combinations
2643
	 * calculate
2670
	 * calculate
2644
	 * peak_kbps *= (1006/1000)
2671
	 * peak_kbps *= (1006/1000)
2645
	 * peak_kbps *= (64/54)
2672
	 * peak_kbps *= (64/54)
2646
	 * peak_kbps *= 8    convert to bytes
2673
	 * peak_kbps *= 8    convert to bytes
2647
	 */
2674
	 */
2648
 
2675
 
2649
	numerator = 64 * 1006;
2676
	numerator = 64 * 1006;
2650
	denominator = 54 * 8 * 1000 * 1000;
2677
	denominator = 54 * 8 * 1000 * 1000;
2651
 
2678
 
2652
	kbps *= numerator;
2679
	kbps *= numerator;
2653
	peak_kbps = drm_fixp_from_fraction(kbps, denominator);
2680
	peak_kbps = drm_fixp_from_fraction(kbps, denominator);
2654
 
2681
 
2655
	return drm_fixp2int_ceil(peak_kbps);
2682
	return drm_fixp2int_ceil(peak_kbps);
2656
}
2683
}
2657
EXPORT_SYMBOL(drm_dp_calc_pbn_mode);
2684
EXPORT_SYMBOL(drm_dp_calc_pbn_mode);
2658
 
2685
 
2659
static int test_calc_pbn_mode(void)
2686
static int test_calc_pbn_mode(void)
2660
{
2687
{
2661
	int ret;
2688
	int ret;
2662
	ret = drm_dp_calc_pbn_mode(154000, 30);
2689
	ret = drm_dp_calc_pbn_mode(154000, 30);
2663
	if (ret != 689) {
2690
	if (ret != 689) {
2664
		DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
2691
		DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
2665
				154000, 30, 689, ret);
2692
				154000, 30, 689, ret);
2666
		return -EINVAL;
2693
		return -EINVAL;
2667
	}
2694
	}
2668
	ret = drm_dp_calc_pbn_mode(234000, 30);
2695
	ret = drm_dp_calc_pbn_mode(234000, 30);
2669
	if (ret != 1047) {
2696
	if (ret != 1047) {
2670
		DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
2697
		DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
2671
				234000, 30, 1047, ret);
2698
				234000, 30, 1047, ret);
2672
		return -EINVAL;
2699
		return -EINVAL;
2673
	}
2700
	}
2674
	ret = drm_dp_calc_pbn_mode(297000, 24);
2701
	ret = drm_dp_calc_pbn_mode(297000, 24);
2675
	if (ret != 1063) {
2702
	if (ret != 1063) {
2676
		DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
2703
		DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
2677
				297000, 24, 1063, ret);
2704
				297000, 24, 1063, ret);
2678
		return -EINVAL;
2705
		return -EINVAL;
2679
	}
2706
	}
2680
	return 0;
2707
	return 0;
2681
}
2708
}
2682
 
2709
 
2683
/* we want to kick the TX after we've ack the up/down IRQs. */
2710
/* we want to kick the TX after we've ack the up/down IRQs. */
2684
static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr)
2711
static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr)
2685
{
2712
{
2686
//   queue_work(system_long_wq, &mgr->tx_work);
2713
//   queue_work(system_long_wq, &mgr->tx_work);
2687
}
2714
}
2688
 
2715
 
2689
static void drm_dp_mst_dump_mstb(struct seq_file *m,
2716
static void drm_dp_mst_dump_mstb(struct seq_file *m,
2690
				 struct drm_dp_mst_branch *mstb)
2717
				 struct drm_dp_mst_branch *mstb)
2691
{
2718
{
2692
	struct drm_dp_mst_port *port;
2719
	struct drm_dp_mst_port *port;
2693
	int tabs = mstb->lct;
2720
	int tabs = mstb->lct;
2694
	char prefix[10];
2721
	char prefix[10];
2695
	int i;
2722
	int i;
2696
 
2723
 
2697
	for (i = 0; i < tabs; i++)
2724
	for (i = 0; i < tabs; i++)
2698
		prefix[i] = '\t';
2725
		prefix[i] = '\t';
2699
	prefix[i] = '\0';
2726
	prefix[i] = '\0';
2700
 
2727
 
2701
//   seq_printf(m, "%smst: %p, %d\n", prefix, mstb, mstb->num_ports);
2728
//   seq_printf(m, "%smst: %p, %d\n", prefix, mstb, mstb->num_ports);
2702
//   list_for_each_entry(port, &mstb->ports, next) {
2729
//   list_for_each_entry(port, &mstb->ports, next) {
2703
//       seq_printf(m, "%sport: %d: ddps: %d ldps: %d, %p, conn: %p\n", prefix, port->port_num, port->ddps, port->ldps, port, port->connector);
2730
//       seq_printf(m, "%sport: %d: ddps: %d ldps: %d, %p, conn: %p\n", prefix, port->port_num, port->ddps, port->ldps, port, port->connector);
2704
//       if (port->mstb)
2731
//       if (port->mstb)
2705
//           drm_dp_mst_dump_mstb(m, port->mstb);
2732
//           drm_dp_mst_dump_mstb(m, port->mstb);
2706
//   }
2733
//   }
2707
}
2734
}
2708
 
2735
 
2709
static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
2736
static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
2710
				  char *buf)
2737
				  char *buf)
2711
{
2738
{
2712
	int ret;
2739
	int ret;
2713
	int i;
2740
	int i;
2714
	for (i = 0; i < 4; i++) {
2741
	for (i = 0; i < 4; i++) {
2715
		ret = drm_dp_dpcd_read(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS + (i * 16), &buf[i * 16], 16);
2742
		ret = drm_dp_dpcd_read(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS + (i * 16), &buf[i * 16], 16);
2716
		if (ret != 16)
2743
		if (ret != 16)
2717
			break;
2744
			break;
2718
	}
2745
	}
2719
	if (i == 4)
2746
	if (i == 4)
2720
		return true;
2747
		return true;
2721
	return false;
2748
	return false;
2722
}
2749
}
2723
 
2750
 
2724
/**
2751
/**
2725
 * drm_dp_mst_dump_topology(): dump topology to seq file.
2752
 * drm_dp_mst_dump_topology(): dump topology to seq file.
2726
 * @m: seq_file to dump output to
2753
 * @m: seq_file to dump output to
2727
 * @mgr: manager to dump current topology for.
2754
 * @mgr: manager to dump current topology for.
2728
 *
2755
 *
2729
 * helper to dump MST topology to a seq file for debugfs.
2756
 * helper to dump MST topology to a seq file for debugfs.
2730
 */
2757
 */
2731
void drm_dp_mst_dump_topology(struct seq_file *m,
2758
void drm_dp_mst_dump_topology(struct seq_file *m,
2732
			      struct drm_dp_mst_topology_mgr *mgr)
2759
			      struct drm_dp_mst_topology_mgr *mgr)
2733
{
2760
{
2734
	int i;
2761
	int i;
2735
	struct drm_dp_mst_port *port;
2762
	struct drm_dp_mst_port *port;
2736
	mutex_lock(&mgr->lock);
2763
	mutex_lock(&mgr->lock);
2737
	if (mgr->mst_primary)
2764
	if (mgr->mst_primary)
2738
		drm_dp_mst_dump_mstb(m, mgr->mst_primary);
2765
		drm_dp_mst_dump_mstb(m, mgr->mst_primary);
2739
 
2766
 
2740
	/* dump VCPIs */
2767
	/* dump VCPIs */
2741
	mutex_unlock(&mgr->lock);
2768
	mutex_unlock(&mgr->lock);
2742
 
2769
 
2743
 
2770
 
2744
 
2771
 
2745
}
2772
}
2746
EXPORT_SYMBOL(drm_dp_mst_dump_topology);
2773
EXPORT_SYMBOL(drm_dp_mst_dump_topology);
2747
 
2774
 
2748
static void drm_dp_tx_work(struct work_struct *work)
2775
static void drm_dp_tx_work(struct work_struct *work)
2749
{
2776
{
2750
	struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, tx_work);
2777
	struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, tx_work);
2751
 
2778
 
2752
	mutex_lock(&mgr->qlock);
2779
	mutex_lock(&mgr->qlock);
2753
	if (mgr->tx_down_in_progress)
2780
	if (mgr->tx_down_in_progress)
2754
		process_single_down_tx_qlock(mgr);
2781
		process_single_down_tx_qlock(mgr);
2755
	mutex_unlock(&mgr->qlock);
2782
	mutex_unlock(&mgr->qlock);
2756
}
2783
}
2757
 
2784
 
2758
static void drm_dp_free_mst_port(struct kref *kref)
2785
static void drm_dp_free_mst_port(struct kref *kref)
2759
{
2786
{
2760
	struct drm_dp_mst_port *port = container_of(kref, struct drm_dp_mst_port, kref);
2787
	struct drm_dp_mst_port *port = container_of(kref, struct drm_dp_mst_port, kref);
2761
	kref_put(&port->parent->kref, drm_dp_free_mst_branch_device);
2788
	kref_put(&port->parent->kref, drm_dp_free_mst_branch_device);
2762
	kfree(port);
2789
	kfree(port);
2763
}
2790
}
2764
/**
2791
/**
2765
 * drm_dp_mst_topology_mgr_init - initialise a topology manager
2792
 * drm_dp_mst_topology_mgr_init - initialise a topology manager
2766
 * @mgr: manager struct to initialise
2793
 * @mgr: manager struct to initialise
2767
 * @dev: device providing this structure - for i2c addition.
2794
 * @dev: device providing this structure - for i2c addition.
2768
 * @aux: DP helper aux channel to talk to this device
2795
 * @aux: DP helper aux channel to talk to this device
2769
 * @max_dpcd_transaction_bytes: hw specific DPCD transaction limit
2796
 * @max_dpcd_transaction_bytes: hw specific DPCD transaction limit
2770
 * @max_payloads: maximum number of payloads this GPU can source
2797
 * @max_payloads: maximum number of payloads this GPU can source
2771
 * @conn_base_id: the connector object ID the MST device is connected to.
2798
 * @conn_base_id: the connector object ID the MST device is connected to.
2772
 *
2799
 *
2773
 * Return 0 for success, or negative error code on failure
2800
 * Return 0 for success, or negative error code on failure
2774
 */
2801
 */
2775
int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
2802
int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
2776
				 struct device *dev, struct drm_dp_aux *aux,
2803
				 struct device *dev, struct drm_dp_aux *aux,
2777
				 int max_dpcd_transaction_bytes,
2804
				 int max_dpcd_transaction_bytes,
2778
				 int max_payloads, int conn_base_id)
2805
				 int max_payloads, int conn_base_id)
2779
{
2806
{
2780
	mutex_init(&mgr->lock);
2807
	mutex_init(&mgr->lock);
2781
	mutex_init(&mgr->qlock);
2808
	mutex_init(&mgr->qlock);
2782
	mutex_init(&mgr->payload_lock);
2809
	mutex_init(&mgr->payload_lock);
2783
	mutex_init(&mgr->destroy_connector_lock);
2810
	mutex_init(&mgr->destroy_connector_lock);
2784
	INIT_LIST_HEAD(&mgr->tx_msg_downq);
2811
	INIT_LIST_HEAD(&mgr->tx_msg_downq);
2785
	INIT_LIST_HEAD(&mgr->destroy_connector_list);
2812
	INIT_LIST_HEAD(&mgr->destroy_connector_list);
2786
	INIT_WORK(&mgr->work, drm_dp_mst_link_probe_work);
2813
	INIT_WORK(&mgr->work, drm_dp_mst_link_probe_work);
2787
	INIT_WORK(&mgr->tx_work, drm_dp_tx_work);
2814
	INIT_WORK(&mgr->tx_work, drm_dp_tx_work);
2788
	init_waitqueue_head(&mgr->tx_waitq);
2815
	init_waitqueue_head(&mgr->tx_waitq);
2789
	mgr->dev = dev;
2816
	mgr->dev = dev;
2790
	mgr->aux = aux;
2817
	mgr->aux = aux;
2791
	mgr->max_dpcd_transaction_bytes = max_dpcd_transaction_bytes;
2818
	mgr->max_dpcd_transaction_bytes = max_dpcd_transaction_bytes;
2792
	mgr->max_payloads = max_payloads;
2819
	mgr->max_payloads = max_payloads;
2793
	mgr->conn_base_id = conn_base_id;
2820
	mgr->conn_base_id = conn_base_id;
2794
	mgr->payloads = kcalloc(max_payloads, sizeof(struct drm_dp_payload), GFP_KERNEL);
2821
	mgr->payloads = kcalloc(max_payloads, sizeof(struct drm_dp_payload), GFP_KERNEL);
2795
	if (!mgr->payloads)
2822
	if (!mgr->payloads)
2796
		return -ENOMEM;
2823
		return -ENOMEM;
2797
	mgr->proposed_vcpis = kcalloc(max_payloads, sizeof(struct drm_dp_vcpi *), GFP_KERNEL);
2824
	mgr->proposed_vcpis = kcalloc(max_payloads, sizeof(struct drm_dp_vcpi *), GFP_KERNEL);
2798
	if (!mgr->proposed_vcpis)
2825
	if (!mgr->proposed_vcpis)
2799
		return -ENOMEM;
2826
		return -ENOMEM;
2800
	set_bit(0, &mgr->payload_mask);
2827
	set_bit(0, &mgr->payload_mask);
2801
	test_calc_pbn_mode();
2828
	test_calc_pbn_mode();
2802
	return 0;
2829
	return 0;
2803
}
2830
}
2804
EXPORT_SYMBOL(drm_dp_mst_topology_mgr_init);
2831
EXPORT_SYMBOL(drm_dp_mst_topology_mgr_init);
2805
 
2832
 
2806
/**
2833
/**
2807
 * drm_dp_mst_topology_mgr_destroy() - destroy topology manager.
2834
 * drm_dp_mst_topology_mgr_destroy() - destroy topology manager.
2808
 * @mgr: manager to destroy
2835
 * @mgr: manager to destroy
2809
 */
2836
 */
2810
void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr)
2837
void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr)
2811
{
2838
{
2812
	mutex_lock(&mgr->payload_lock);
2839
	mutex_lock(&mgr->payload_lock);
2813
	kfree(mgr->payloads);
2840
	kfree(mgr->payloads);
2814
	mgr->payloads = NULL;
2841
	mgr->payloads = NULL;
2815
	kfree(mgr->proposed_vcpis);
2842
	kfree(mgr->proposed_vcpis);
2816
	mgr->proposed_vcpis = NULL;
2843
	mgr->proposed_vcpis = NULL;
2817
	mutex_unlock(&mgr->payload_lock);
2844
	mutex_unlock(&mgr->payload_lock);
2818
	mgr->dev = NULL;
2845
	mgr->dev = NULL;
2819
	mgr->aux = NULL;
2846
	mgr->aux = NULL;
2820
}
2847
}
2821
EXPORT_SYMBOL(drm_dp_mst_topology_mgr_destroy);
2848
EXPORT_SYMBOL(drm_dp_mst_topology_mgr_destroy);
2822
 
2849
 
2823
/* I2C device */
2850
/* I2C device */
2824
static int drm_dp_mst_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
2851
static int drm_dp_mst_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
2825
			       int num)
2852
			       int num)
2826
{
2853
{
2827
	struct drm_dp_aux *aux = adapter->algo_data;
2854
	struct drm_dp_aux *aux = adapter->algo_data;
2828
	struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port, aux);
2855
	struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port, aux);
2829
	struct drm_dp_mst_branch *mstb;
2856
	struct drm_dp_mst_branch *mstb;
2830
	struct drm_dp_mst_topology_mgr *mgr = port->mgr;
2857
	struct drm_dp_mst_topology_mgr *mgr = port->mgr;
2831
	unsigned int i;
2858
	unsigned int i;
2832
	bool reading = false;
2859
	bool reading = false;
2833
	struct drm_dp_sideband_msg_req_body msg;
2860
	struct drm_dp_sideband_msg_req_body msg;
2834
	struct drm_dp_sideband_msg_tx *txmsg = NULL;
2861
	struct drm_dp_sideband_msg_tx *txmsg = NULL;
2835
	int ret;
2862
	int ret;
2836
 
2863
 
2837
	mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
2864
	mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
2838
	if (!mstb)
2865
	if (!mstb)
2839
		return -EREMOTEIO;
2866
		return -EREMOTEIO;
2840
 
2867
 
2841
	/* construct i2c msg */
2868
	/* construct i2c msg */
2842
	/* see if last msg is a read */
2869
	/* see if last msg is a read */
2843
	if (msgs[num - 1].flags & I2C_M_RD)
2870
	if (msgs[num - 1].flags & I2C_M_RD)
2844
		reading = true;
2871
		reading = true;
2845
 
2872
 
2846
	if (!reading || (num - 1 > DP_REMOTE_I2C_READ_MAX_TRANSACTIONS)) {
2873
	if (!reading || (num - 1 > DP_REMOTE_I2C_READ_MAX_TRANSACTIONS)) {
2847
		DRM_DEBUG_KMS("Unsupported I2C transaction for MST device\n");
2874
		DRM_DEBUG_KMS("Unsupported I2C transaction for MST device\n");
2848
		ret = -EIO;
2875
		ret = -EIO;
2849
		goto out;
2876
		goto out;
2850
	}
2877
	}
2851
 
2878
 
2852
	memset(&msg, 0, sizeof(msg));
2879
	memset(&msg, 0, sizeof(msg));
2853
	msg.req_type = DP_REMOTE_I2C_READ;
2880
	msg.req_type = DP_REMOTE_I2C_READ;
2854
	msg.u.i2c_read.num_transactions = num - 1;
2881
	msg.u.i2c_read.num_transactions = num - 1;
2855
	msg.u.i2c_read.port_number = port->port_num;
2882
	msg.u.i2c_read.port_number = port->port_num;
2856
	for (i = 0; i < num - 1; i++) {
2883
	for (i = 0; i < num - 1; i++) {
2857
		msg.u.i2c_read.transactions[i].i2c_dev_id = msgs[i].addr;
2884
		msg.u.i2c_read.transactions[i].i2c_dev_id = msgs[i].addr;
2858
		msg.u.i2c_read.transactions[i].num_bytes = msgs[i].len;
2885
		msg.u.i2c_read.transactions[i].num_bytes = msgs[i].len;
2859
		msg.u.i2c_read.transactions[i].bytes = msgs[i].buf;
2886
		msg.u.i2c_read.transactions[i].bytes = msgs[i].buf;
2860
	}
2887
	}
2861
	msg.u.i2c_read.read_i2c_device_id = msgs[num - 1].addr;
2888
	msg.u.i2c_read.read_i2c_device_id = msgs[num - 1].addr;
2862
	msg.u.i2c_read.num_bytes_read = msgs[num - 1].len;
2889
	msg.u.i2c_read.num_bytes_read = msgs[num - 1].len;
2863
 
2890
 
2864
	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
2891
	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
2865
	if (!txmsg) {
2892
	if (!txmsg) {
2866
		ret = -ENOMEM;
2893
		ret = -ENOMEM;
2867
		goto out;
2894
		goto out;
2868
	}
2895
	}
2869
 
2896
 
2870
	txmsg->dst = mstb;
2897
	txmsg->dst = mstb;
2871
	drm_dp_encode_sideband_req(&msg, txmsg);
2898
	drm_dp_encode_sideband_req(&msg, txmsg);
2872
 
2899
 
2873
	drm_dp_queue_down_tx(mgr, txmsg);
2900
	drm_dp_queue_down_tx(mgr, txmsg);
2874
 
2901
 
2875
	ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
2902
	ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
2876
	if (ret > 0) {
2903
	if (ret > 0) {
2877
 
2904
 
2878
		if (txmsg->reply.reply_type == 1) { /* got a NAK back */
2905
		if (txmsg->reply.reply_type == 1) { /* got a NAK back */
2879
			ret = -EREMOTEIO;
2906
			ret = -EREMOTEIO;
2880
			goto out;
2907
			goto out;
2881
		}
2908
		}
2882
		if (txmsg->reply.u.remote_i2c_read_ack.num_bytes != msgs[num - 1].len) {
2909
		if (txmsg->reply.u.remote_i2c_read_ack.num_bytes != msgs[num - 1].len) {
2883
			ret = -EIO;
2910
			ret = -EIO;
2884
			goto out;
2911
			goto out;
2885
		}
2912
		}
2886
		memcpy(msgs[num - 1].buf, txmsg->reply.u.remote_i2c_read_ack.bytes, msgs[num - 1].len);
2913
		memcpy(msgs[num - 1].buf, txmsg->reply.u.remote_i2c_read_ack.bytes, msgs[num - 1].len);
2887
		ret = num;
2914
		ret = num;
2888
	}
2915
	}
2889
out:
2916
out:
2890
	kfree(txmsg);
2917
	kfree(txmsg);
2891
	drm_dp_put_mst_branch_device(mstb);
2918
	drm_dp_put_mst_branch_device(mstb);
2892
	return ret;
2919
	return ret;
2893
}
2920
}
2894
 
2921
 
2895
static u32 drm_dp_mst_i2c_functionality(struct i2c_adapter *adapter)
2922
static u32 drm_dp_mst_i2c_functionality(struct i2c_adapter *adapter)
2896
{
2923
{
2897
	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
2924
	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
2898
	       I2C_FUNC_SMBUS_READ_BLOCK_DATA |
2925
	       I2C_FUNC_SMBUS_READ_BLOCK_DATA |
2899
	       I2C_FUNC_SMBUS_BLOCK_PROC_CALL |
2926
	       I2C_FUNC_SMBUS_BLOCK_PROC_CALL |
2900
	       I2C_FUNC_10BIT_ADDR;
2927
	       I2C_FUNC_10BIT_ADDR;
2901
}
2928
}
2902
 
2929
 
2903
static const struct i2c_algorithm drm_dp_mst_i2c_algo = {
2930
static const struct i2c_algorithm drm_dp_mst_i2c_algo = {
2904
	.functionality = drm_dp_mst_i2c_functionality,
2931
	.functionality = drm_dp_mst_i2c_functionality,
2905
	.master_xfer = drm_dp_mst_i2c_xfer,
2932
	.master_xfer = drm_dp_mst_i2c_xfer,
2906
};
2933
};
2907
 
2934
 
2908
/**
2935
/**
2909
 * drm_dp_mst_register_i2c_bus() - register an I2C adapter for I2C-over-AUX
2936
 * drm_dp_mst_register_i2c_bus() - register an I2C adapter for I2C-over-AUX
2910
 * @aux: DisplayPort AUX channel
2937
 * @aux: DisplayPort AUX channel
2911
 *
2938
 *
2912
 * Returns 0 on success or a negative error code on failure.
2939
 * Returns 0 on success or a negative error code on failure.
2913
 */
2940
 */
2914
static int drm_dp_mst_register_i2c_bus(struct drm_dp_aux *aux)
2941
static int drm_dp_mst_register_i2c_bus(struct drm_dp_aux *aux)
2915
{
2942
{
2916
	aux->ddc.algo = &drm_dp_mst_i2c_algo;
2943
	aux->ddc.algo = &drm_dp_mst_i2c_algo;
2917
	aux->ddc.algo_data = aux;
2944
	aux->ddc.algo_data = aux;
2918
	aux->ddc.retries = 3;
2945
	aux->ddc.retries = 3;
2919
 
2946
 
2920
	aux->ddc.class = I2C_CLASS_DDC;
2947
	aux->ddc.class = I2C_CLASS_DDC;
2921
	aux->ddc.owner = THIS_MODULE;
2948
	aux->ddc.owner = THIS_MODULE;
2922
	aux->ddc.dev.parent = aux->dev;
2949
	aux->ddc.dev.parent = aux->dev;
2923
 
2950
 
2924
	return i2c_add_adapter(&aux->ddc);
2951
	return i2c_add_adapter(&aux->ddc);
2925
}
2952
}
2926
 
2953
 
2927
/**
2954
/**
2928
 * drm_dp_mst_unregister_i2c_bus() - unregister an I2C-over-AUX adapter
2955
 * drm_dp_mst_unregister_i2c_bus() - unregister an I2C-over-AUX adapter
2929
 * @aux: DisplayPort AUX channel
2956
 * @aux: DisplayPort AUX channel
2930
 */
2957
 */
2931
static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_aux *aux)
2958
static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_aux *aux)
2932
{
2959
{
2933
	i2c_del_adapter(&aux->ddc);
2960
	i2c_del_adapter(&aux->ddc);
2934
}
2961
}