Subversion Repositories Kolibri OS

Rev

Rev 5060 | Rev 6084 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
5060 serge 1
/*
2
 * Copyright © 2014 Red Hat
3
 *
4
 * Permission to use, copy, modify, distribute, and sell this software and its
5
 * documentation for any purpose is hereby granted without fee, provided that
6
 * the above copyright notice appear in all copies and that both that copyright
7
 * notice and this permission notice appear in supporting documentation, and
8
 * that the name of the copyright holders not be used in advertising or
9
 * publicity pertaining to distribution of the software without specific,
10
 * written prior permission.  The copyright holders make no representations
11
 * about the suitability of this software for any purpose.  It is provided "as
12
 * is" without express or implied warranty.
13
 *
14
 * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
15
 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
16
 * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
17
 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
18
 * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
19
 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
20
 * OF THIS SOFTWARE.
21
 */
22
 
23
#include 
24
#include 
25
#include 
26
#include 
27
#include 
28
#include 
29
#include 
30
#include 
31
#include 
32
#include 
33
#include 
34
#include 
35
 
36
#include 
37
 
38
u64 get_jiffies_64(void)
39
{
40
    return jiffies;
41
}
42
/**
43
 * DOC: dp mst helper
44
 *
45
 * These functions contain parts of the DisplayPort 1.2a MultiStream Transport
46
 * protocol. The helpers contain a topology manager and bandwidth manager.
47
 * The helpers encapsulate the sending and received of sideband msgs.
48
 */
49
static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
50
				  char *buf);
51
static int test_calc_pbn_mode(void);
52
 
53
static void drm_dp_put_port(struct drm_dp_mst_port *port);
54
 
55
static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
56
				     int id,
57
				     struct drm_dp_payload *payload);
58
 
59
static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
60
				  struct drm_dp_mst_port *port,
61
				  int offset, int size, u8 *bytes);
62
 
63
static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
64
				    struct drm_dp_mst_branch *mstb);
65
static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
66
					   struct drm_dp_mst_branch *mstb,
67
					   struct drm_dp_mst_port *port);
68
static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
69
				 u8 *guid);
70
 
71
static int drm_dp_mst_register_i2c_bus(struct drm_dp_aux *aux);
72
static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_aux *aux);
73
static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr);
74
/* sideband msg handling */
75
static u8 drm_dp_msg_header_crc4(const uint8_t *data, size_t num_nibbles)
76
{
77
	u8 bitmask = 0x80;
78
	u8 bitshift = 7;
79
	u8 array_index = 0;
80
	int number_of_bits = num_nibbles * 4;
81
	u8 remainder = 0;
82
 
83
	while (number_of_bits != 0) {
84
		number_of_bits--;
85
		remainder <<= 1;
86
		remainder |= (data[array_index] & bitmask) >> bitshift;
87
		bitmask >>= 1;
88
		bitshift--;
89
		if (bitmask == 0) {
90
			bitmask = 0x80;
91
			bitshift = 7;
92
			array_index++;
93
		}
94
		if ((remainder & 0x10) == 0x10)
95
			remainder ^= 0x13;
96
	}
97
 
98
	number_of_bits = 4;
99
	while (number_of_bits != 0) {
100
		number_of_bits--;
101
		remainder <<= 1;
102
		if ((remainder & 0x10) != 0)
103
			remainder ^= 0x13;
104
	}
105
 
106
	return remainder;
107
}
108
 
109
static u8 drm_dp_msg_data_crc4(const uint8_t *data, u8 number_of_bytes)
110
{
111
	u8 bitmask = 0x80;
112
	u8 bitshift = 7;
113
	u8 array_index = 0;
114
	int number_of_bits = number_of_bytes * 8;
115
	u16 remainder = 0;
116
 
117
	while (number_of_bits != 0) {
118
		number_of_bits--;
119
		remainder <<= 1;
120
		remainder |= (data[array_index] & bitmask) >> bitshift;
121
		bitmask >>= 1;
122
		bitshift--;
123
		if (bitmask == 0) {
124
			bitmask = 0x80;
125
			bitshift = 7;
126
			array_index++;
127
		}
128
		if ((remainder & 0x100) == 0x100)
129
			remainder ^= 0xd5;
130
	}
131
 
132
	number_of_bits = 8;
133
	while (number_of_bits != 0) {
134
		number_of_bits--;
135
		remainder <<= 1;
136
		if ((remainder & 0x100) != 0)
137
			remainder ^= 0xd5;
138
	}
139
 
140
	return remainder & 0xff;
141
}
142
static inline u8 drm_dp_calc_sb_hdr_size(struct drm_dp_sideband_msg_hdr *hdr)
143
{
144
	u8 size = 3;
145
	size += (hdr->lct / 2);
146
	return size;
147
}
148
 
149
static void drm_dp_encode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr,
150
					   u8 *buf, int *len)
151
{
152
	int idx = 0;
153
	int i;
154
	u8 crc4;
155
	buf[idx++] = ((hdr->lct & 0xf) << 4) | (hdr->lcr & 0xf);
156
	for (i = 0; i < (hdr->lct / 2); i++)
157
		buf[idx++] = hdr->rad[i];
158
	buf[idx++] = (hdr->broadcast << 7) | (hdr->path_msg << 6) |
159
		(hdr->msg_len & 0x3f);
160
	buf[idx++] = (hdr->somt << 7) | (hdr->eomt << 6) | (hdr->seqno << 4);
161
 
162
	crc4 = drm_dp_msg_header_crc4(buf, (idx * 2) - 1);
163
	buf[idx - 1] |= (crc4 & 0xf);
164
 
165
	*len = idx;
166
}
167
 
168
static bool drm_dp_decode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr,
169
					   u8 *buf, int buflen, u8 *hdrlen)
170
{
171
	u8 crc4;
172
	u8 len;
173
	int i;
174
	u8 idx;
175
	if (buf[0] == 0)
176
		return false;
177
	len = 3;
178
	len += ((buf[0] & 0xf0) >> 4) / 2;
179
	if (len > buflen)
180
		return false;
181
	crc4 = drm_dp_msg_header_crc4(buf, (len * 2) - 1);
182
 
183
	if ((crc4 & 0xf) != (buf[len - 1] & 0xf)) {
184
		DRM_DEBUG_KMS("crc4 mismatch 0x%x 0x%x\n", crc4, buf[len - 1]);
185
		return false;
186
	}
187
 
188
	hdr->lct = (buf[0] & 0xf0) >> 4;
189
	hdr->lcr = (buf[0] & 0xf);
190
	idx = 1;
191
	for (i = 0; i < (hdr->lct / 2); i++)
192
		hdr->rad[i] = buf[idx++];
193
	hdr->broadcast = (buf[idx] >> 7) & 0x1;
194
	hdr->path_msg = (buf[idx] >> 6) & 0x1;
195
	hdr->msg_len = buf[idx] & 0x3f;
196
	idx++;
197
	hdr->somt = (buf[idx] >> 7) & 0x1;
198
	hdr->eomt = (buf[idx] >> 6) & 0x1;
199
	hdr->seqno = (buf[idx] >> 4) & 0x1;
200
	idx++;
201
	*hdrlen = idx;
202
	return true;
203
}
204
 
205
static void drm_dp_encode_sideband_req(struct drm_dp_sideband_msg_req_body *req,
206
				       struct drm_dp_sideband_msg_tx *raw)
207
{
208
	int idx = 0;
209
	int i;
210
	u8 *buf = raw->msg;
211
	buf[idx++] = req->req_type & 0x7f;
212
 
213
	switch (req->req_type) {
214
	case DP_ENUM_PATH_RESOURCES:
215
		buf[idx] = (req->u.port_num.port_number & 0xf) << 4;
216
		idx++;
217
		break;
218
	case DP_ALLOCATE_PAYLOAD:
219
		buf[idx] = (req->u.allocate_payload.port_number & 0xf) << 4 |
220
			(req->u.allocate_payload.number_sdp_streams & 0xf);
221
		idx++;
222
		buf[idx] = (req->u.allocate_payload.vcpi & 0x7f);
223
		idx++;
224
		buf[idx] = (req->u.allocate_payload.pbn >> 8);
225
		idx++;
226
		buf[idx] = (req->u.allocate_payload.pbn & 0xff);
227
		idx++;
228
		for (i = 0; i < req->u.allocate_payload.number_sdp_streams / 2; i++) {
229
			buf[idx] = ((req->u.allocate_payload.sdp_stream_sink[i * 2] & 0xf) << 4) |
230
				(req->u.allocate_payload.sdp_stream_sink[i * 2 + 1] & 0xf);
231
			idx++;
232
		}
233
		if (req->u.allocate_payload.number_sdp_streams & 1) {
234
			i = req->u.allocate_payload.number_sdp_streams - 1;
235
			buf[idx] = (req->u.allocate_payload.sdp_stream_sink[i] & 0xf) << 4;
236
			idx++;
237
		}
238
		break;
239
	case DP_QUERY_PAYLOAD:
240
		buf[idx] = (req->u.query_payload.port_number & 0xf) << 4;
241
		idx++;
242
		buf[idx] = (req->u.query_payload.vcpi & 0x7f);
243
		idx++;
244
		break;
245
	case DP_REMOTE_DPCD_READ:
246
		buf[idx] = (req->u.dpcd_read.port_number & 0xf) << 4;
247
		buf[idx] |= ((req->u.dpcd_read.dpcd_address & 0xf0000) >> 16) & 0xf;
248
		idx++;
249
		buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff00) >> 8;
250
		idx++;
251
		buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff);
252
		idx++;
253
		buf[idx] = (req->u.dpcd_read.num_bytes);
254
		idx++;
255
		break;
256
 
257
	case DP_REMOTE_DPCD_WRITE:
258
		buf[idx] = (req->u.dpcd_write.port_number & 0xf) << 4;
259
		buf[idx] |= ((req->u.dpcd_write.dpcd_address & 0xf0000) >> 16) & 0xf;
260
		idx++;
261
		buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff00) >> 8;
262
		idx++;
263
		buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff);
264
		idx++;
265
		buf[idx] = (req->u.dpcd_write.num_bytes);
266
		idx++;
267
		memcpy(&buf[idx], req->u.dpcd_write.bytes, req->u.dpcd_write.num_bytes);
268
		idx += req->u.dpcd_write.num_bytes;
269
		break;
270
	case DP_REMOTE_I2C_READ:
271
		buf[idx] = (req->u.i2c_read.port_number & 0xf) << 4;
272
		buf[idx] |= (req->u.i2c_read.num_transactions & 0x3);
273
		idx++;
274
		for (i = 0; i < (req->u.i2c_read.num_transactions & 0x3); i++) {
275
			buf[idx] = req->u.i2c_read.transactions[i].i2c_dev_id & 0x7f;
276
			idx++;
277
			buf[idx] = req->u.i2c_read.transactions[i].num_bytes;
278
			idx++;
279
			memcpy(&buf[idx], req->u.i2c_read.transactions[i].bytes, req->u.i2c_read.transactions[i].num_bytes);
280
			idx += req->u.i2c_read.transactions[i].num_bytes;
281
 
282
			buf[idx] = (req->u.i2c_read.transactions[i].no_stop_bit & 0x1) << 5;
283
			buf[idx] |= (req->u.i2c_read.transactions[i].i2c_transaction_delay & 0xf);
284
			idx++;
285
		}
286
		buf[idx] = (req->u.i2c_read.read_i2c_device_id) & 0x7f;
287
		idx++;
288
		buf[idx] = (req->u.i2c_read.num_bytes_read);
289
		idx++;
290
		break;
291
 
292
	case DP_REMOTE_I2C_WRITE:
293
		buf[idx] = (req->u.i2c_write.port_number & 0xf) << 4;
294
		idx++;
295
		buf[idx] = (req->u.i2c_write.write_i2c_device_id) & 0x7f;
296
		idx++;
297
		buf[idx] = (req->u.i2c_write.num_bytes);
298
		idx++;
299
		memcpy(&buf[idx], req->u.i2c_write.bytes, req->u.i2c_write.num_bytes);
300
		idx += req->u.i2c_write.num_bytes;
301
		break;
302
	}
303
	raw->cur_len = idx;
304
}
305
 
306
static void drm_dp_crc_sideband_chunk_req(u8 *msg, u8 len)
307
{
308
	u8 crc4;
309
	crc4 = drm_dp_msg_data_crc4(msg, len);
310
	msg[len] = crc4;
311
}
312
 
313
static void drm_dp_encode_sideband_reply(struct drm_dp_sideband_msg_reply_body *rep,
314
					 struct drm_dp_sideband_msg_tx *raw)
315
{
316
	int idx = 0;
317
	u8 *buf = raw->msg;
318
 
319
	buf[idx++] = (rep->reply_type & 0x1) << 7 | (rep->req_type & 0x7f);
320
 
321
	raw->cur_len = idx;
322
}
323
 
324
/* this adds a chunk of msg to the builder to get the final msg */
325
static bool drm_dp_sideband_msg_build(struct drm_dp_sideband_msg_rx *msg,
326
				      u8 *replybuf, u8 replybuflen, bool hdr)
327
{
328
	int ret;
329
	u8 crc4;
330
 
331
	if (hdr) {
332
		u8 hdrlen;
333
		struct drm_dp_sideband_msg_hdr recv_hdr;
334
		ret = drm_dp_decode_sideband_msg_hdr(&recv_hdr, replybuf, replybuflen, &hdrlen);
335
		if (ret == false) {
336
			print_hex_dump(KERN_DEBUG, "failed hdr", DUMP_PREFIX_NONE, 16, 1, replybuf, replybuflen, false);
337
			return false;
338
		}
339
 
340
		/* get length contained in this portion */
341
		msg->curchunk_len = recv_hdr.msg_len;
342
		msg->curchunk_hdrlen = hdrlen;
343
 
344
		/* we have already gotten an somt - don't bother parsing */
345
		if (recv_hdr.somt && msg->have_somt)
346
			return false;
347
 
348
		if (recv_hdr.somt) {
349
			memcpy(&msg->initial_hdr, &recv_hdr, sizeof(struct drm_dp_sideband_msg_hdr));
350
			msg->have_somt = true;
351
		}
352
		if (recv_hdr.eomt)
353
			msg->have_eomt = true;
354
 
355
		/* copy the bytes for the remainder of this header chunk */
356
		msg->curchunk_idx = min(msg->curchunk_len, (u8)(replybuflen - hdrlen));
357
		memcpy(&msg->chunk[0], replybuf + hdrlen, msg->curchunk_idx);
358
	} else {
359
		memcpy(&msg->chunk[msg->curchunk_idx], replybuf, replybuflen);
360
		msg->curchunk_idx += replybuflen;
361
	}
362
 
363
	if (msg->curchunk_idx >= msg->curchunk_len) {
364
		/* do CRC */
365
		crc4 = drm_dp_msg_data_crc4(msg->chunk, msg->curchunk_len - 1);
366
		/* copy chunk into bigger msg */
367
		memcpy(&msg->msg[msg->curlen], msg->chunk, msg->curchunk_len - 1);
368
		msg->curlen += msg->curchunk_len - 1;
369
	}
370
	return true;
371
}
372
 
373
static bool drm_dp_sideband_parse_link_address(struct drm_dp_sideband_msg_rx *raw,
374
					       struct drm_dp_sideband_msg_reply_body *repmsg)
375
{
376
	int idx = 1;
377
	int i;
378
	memcpy(repmsg->u.link_addr.guid, &raw->msg[idx], 16);
379
	idx += 16;
380
	repmsg->u.link_addr.nports = raw->msg[idx] & 0xf;
381
	idx++;
382
	if (idx > raw->curlen)
383
		goto fail_len;
384
	for (i = 0; i < repmsg->u.link_addr.nports; i++) {
385
		if (raw->msg[idx] & 0x80)
386
			repmsg->u.link_addr.ports[i].input_port = 1;
387
 
388
		repmsg->u.link_addr.ports[i].peer_device_type = (raw->msg[idx] >> 4) & 0x7;
389
		repmsg->u.link_addr.ports[i].port_number = (raw->msg[idx] & 0xf);
390
 
391
		idx++;
392
		if (idx > raw->curlen)
393
			goto fail_len;
394
		repmsg->u.link_addr.ports[i].mcs = (raw->msg[idx] >> 7) & 0x1;
395
		repmsg->u.link_addr.ports[i].ddps = (raw->msg[idx] >> 6) & 0x1;
396
		if (repmsg->u.link_addr.ports[i].input_port == 0)
397
			repmsg->u.link_addr.ports[i].legacy_device_plug_status = (raw->msg[idx] >> 5) & 0x1;
398
		idx++;
399
		if (idx > raw->curlen)
400
			goto fail_len;
401
		if (repmsg->u.link_addr.ports[i].input_port == 0) {
402
			repmsg->u.link_addr.ports[i].dpcd_revision = (raw->msg[idx]);
403
			idx++;
404
			if (idx > raw->curlen)
405
				goto fail_len;
406
			memcpy(repmsg->u.link_addr.ports[i].peer_guid, &raw->msg[idx], 16);
407
			idx += 16;
408
			if (idx > raw->curlen)
409
				goto fail_len;
410
			repmsg->u.link_addr.ports[i].num_sdp_streams = (raw->msg[idx] >> 4) & 0xf;
411
			repmsg->u.link_addr.ports[i].num_sdp_stream_sinks = (raw->msg[idx] & 0xf);
412
			idx++;
413
 
414
		}
415
		if (idx > raw->curlen)
416
			goto fail_len;
417
	}
418
 
419
	return true;
420
fail_len:
421
	DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen);
422
	return false;
423
}
424
 
425
static bool drm_dp_sideband_parse_remote_dpcd_read(struct drm_dp_sideband_msg_rx *raw,
426
						   struct drm_dp_sideband_msg_reply_body *repmsg)
427
{
428
	int idx = 1;
429
	repmsg->u.remote_dpcd_read_ack.port_number = raw->msg[idx] & 0xf;
430
	idx++;
431
	if (idx > raw->curlen)
432
		goto fail_len;
433
	repmsg->u.remote_dpcd_read_ack.num_bytes = raw->msg[idx];
434
	if (idx > raw->curlen)
435
		goto fail_len;
436
 
437
	memcpy(repmsg->u.remote_dpcd_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_dpcd_read_ack.num_bytes);
438
	return true;
439
fail_len:
440
	DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen);
441
	return false;
442
}
443
 
444
static bool drm_dp_sideband_parse_remote_dpcd_write(struct drm_dp_sideband_msg_rx *raw,
445
						      struct drm_dp_sideband_msg_reply_body *repmsg)
446
{
447
	int idx = 1;
448
	repmsg->u.remote_dpcd_write_ack.port_number = raw->msg[idx] & 0xf;
449
	idx++;
450
	if (idx > raw->curlen)
451
		goto fail_len;
452
	return true;
453
fail_len:
454
	DRM_DEBUG_KMS("parse length fail %d %d\n", idx, raw->curlen);
455
	return false;
456
}
457
 
458
static bool drm_dp_sideband_parse_remote_i2c_read_ack(struct drm_dp_sideband_msg_rx *raw,
459
						      struct drm_dp_sideband_msg_reply_body *repmsg)
460
{
461
	int idx = 1;
462
 
463
	repmsg->u.remote_i2c_read_ack.port_number = (raw->msg[idx] & 0xf);
464
	idx++;
465
	if (idx > raw->curlen)
466
		goto fail_len;
467
	repmsg->u.remote_i2c_read_ack.num_bytes = raw->msg[idx];
468
	idx++;
469
	/* TODO check */
470
	memcpy(repmsg->u.remote_i2c_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_i2c_read_ack.num_bytes);
471
	return true;
472
fail_len:
473
	DRM_DEBUG_KMS("remote i2c reply parse length fail %d %d\n", idx, raw->curlen);
474
	return false;
475
}
476
 
477
static bool drm_dp_sideband_parse_enum_path_resources_ack(struct drm_dp_sideband_msg_rx *raw,
478
							  struct drm_dp_sideband_msg_reply_body *repmsg)
479
{
480
	int idx = 1;
481
	repmsg->u.path_resources.port_number = (raw->msg[idx] >> 4) & 0xf;
482
	idx++;
483
	if (idx > raw->curlen)
484
		goto fail_len;
485
	repmsg->u.path_resources.full_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
486
	idx += 2;
487
	if (idx > raw->curlen)
488
		goto fail_len;
489
	repmsg->u.path_resources.avail_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
490
	idx += 2;
491
	if (idx > raw->curlen)
492
		goto fail_len;
493
	return true;
494
fail_len:
495
	DRM_DEBUG_KMS("enum resource parse length fail %d %d\n", idx, raw->curlen);
496
	return false;
497
}
498
 
499
static bool drm_dp_sideband_parse_allocate_payload_ack(struct drm_dp_sideband_msg_rx *raw,
500
							  struct drm_dp_sideband_msg_reply_body *repmsg)
501
{
502
	int idx = 1;
503
	repmsg->u.allocate_payload.port_number = (raw->msg[idx] >> 4) & 0xf;
504
	idx++;
505
	if (idx > raw->curlen)
506
		goto fail_len;
507
	repmsg->u.allocate_payload.vcpi = raw->msg[idx];
508
	idx++;
509
	if (idx > raw->curlen)
510
		goto fail_len;
511
	repmsg->u.allocate_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
512
	idx += 2;
513
	if (idx > raw->curlen)
514
		goto fail_len;
515
	return true;
516
fail_len:
517
	DRM_DEBUG_KMS("allocate payload parse length fail %d %d\n", idx, raw->curlen);
518
	return false;
519
}
520
 
521
static bool drm_dp_sideband_parse_query_payload_ack(struct drm_dp_sideband_msg_rx *raw,
522
						    struct drm_dp_sideband_msg_reply_body *repmsg)
523
{
524
	int idx = 1;
525
	repmsg->u.query_payload.port_number = (raw->msg[idx] >> 4) & 0xf;
526
	idx++;
527
	if (idx > raw->curlen)
528
		goto fail_len;
529
	repmsg->u.query_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]);
530
	idx += 2;
531
	if (idx > raw->curlen)
532
		goto fail_len;
533
	return true;
534
fail_len:
535
	DRM_DEBUG_KMS("query payload parse length fail %d %d\n", idx, raw->curlen);
536
	return false;
537
}
538
 
539
static bool drm_dp_sideband_parse_reply(struct drm_dp_sideband_msg_rx *raw,
540
					struct drm_dp_sideband_msg_reply_body *msg)
541
{
542
	memset(msg, 0, sizeof(*msg));
543
	msg->reply_type = (raw->msg[0] & 0x80) >> 7;
544
	msg->req_type = (raw->msg[0] & 0x7f);
545
 
546
	if (msg->reply_type) {
547
		memcpy(msg->u.nak.guid, &raw->msg[1], 16);
548
		msg->u.nak.reason = raw->msg[17];
549
		msg->u.nak.nak_data = raw->msg[18];
550
		return false;
551
	}
552
 
553
	switch (msg->req_type) {
554
	case DP_LINK_ADDRESS:
555
		return drm_dp_sideband_parse_link_address(raw, msg);
556
	case DP_QUERY_PAYLOAD:
557
		return drm_dp_sideband_parse_query_payload_ack(raw, msg);
558
	case DP_REMOTE_DPCD_READ:
559
		return drm_dp_sideband_parse_remote_dpcd_read(raw, msg);
560
	case DP_REMOTE_DPCD_WRITE:
561
		return drm_dp_sideband_parse_remote_dpcd_write(raw, msg);
562
	case DP_REMOTE_I2C_READ:
563
		return drm_dp_sideband_parse_remote_i2c_read_ack(raw, msg);
564
	case DP_ENUM_PATH_RESOURCES:
565
		return drm_dp_sideband_parse_enum_path_resources_ack(raw, msg);
566
	case DP_ALLOCATE_PAYLOAD:
567
		return drm_dp_sideband_parse_allocate_payload_ack(raw, msg);
568
	default:
569
		DRM_ERROR("Got unknown reply 0x%02x\n", msg->req_type);
570
		return false;
571
	}
572
}
573
 
574
static bool drm_dp_sideband_parse_connection_status_notify(struct drm_dp_sideband_msg_rx *raw,
575
							   struct drm_dp_sideband_msg_req_body *msg)
576
{
577
	int idx = 1;
578
 
579
	msg->u.conn_stat.port_number = (raw->msg[idx] & 0xf0) >> 4;
580
	idx++;
581
	if (idx > raw->curlen)
582
		goto fail_len;
583
 
584
	memcpy(msg->u.conn_stat.guid, &raw->msg[idx], 16);
585
	idx += 16;
586
	if (idx > raw->curlen)
587
		goto fail_len;
588
 
589
	msg->u.conn_stat.legacy_device_plug_status = (raw->msg[idx] >> 6) & 0x1;
590
	msg->u.conn_stat.displayport_device_plug_status = (raw->msg[idx] >> 5) & 0x1;
591
	msg->u.conn_stat.message_capability_status = (raw->msg[idx] >> 4) & 0x1;
592
	msg->u.conn_stat.input_port = (raw->msg[idx] >> 3) & 0x1;
593
	msg->u.conn_stat.peer_device_type = (raw->msg[idx] & 0x7);
594
	idx++;
595
	return true;
596
fail_len:
597
	DRM_DEBUG_KMS("connection status reply parse length fail %d %d\n", idx, raw->curlen);
598
	return false;
599
}
600
 
601
static bool drm_dp_sideband_parse_resource_status_notify(struct drm_dp_sideband_msg_rx *raw,
602
							   struct drm_dp_sideband_msg_req_body *msg)
603
{
604
	int idx = 1;
605
 
606
	msg->u.resource_stat.port_number = (raw->msg[idx] & 0xf0) >> 4;
607
	idx++;
608
	if (idx > raw->curlen)
609
		goto fail_len;
610
 
611
	memcpy(msg->u.resource_stat.guid, &raw->msg[idx], 16);
612
	idx += 16;
613
	if (idx > raw->curlen)
614
		goto fail_len;
615
 
616
	msg->u.resource_stat.available_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]);
617
	idx++;
618
	return true;
619
fail_len:
620
	DRM_DEBUG_KMS("resource status reply parse length fail %d %d\n", idx, raw->curlen);
621
	return false;
622
}
623
 
624
static bool drm_dp_sideband_parse_req(struct drm_dp_sideband_msg_rx *raw,
625
				      struct drm_dp_sideband_msg_req_body *msg)
626
{
627
	memset(msg, 0, sizeof(*msg));
628
	msg->req_type = (raw->msg[0] & 0x7f);
629
 
630
	switch (msg->req_type) {
631
	case DP_CONNECTION_STATUS_NOTIFY:
632
		return drm_dp_sideband_parse_connection_status_notify(raw, msg);
633
	case DP_RESOURCE_STATUS_NOTIFY:
634
		return drm_dp_sideband_parse_resource_status_notify(raw, msg);
635
	default:
636
		DRM_ERROR("Got unknown request 0x%02x\n", msg->req_type);
637
		return false;
638
	}
639
}
640
 
641
static int build_dpcd_write(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32 offset, u8 num_bytes, u8 *bytes)
642
{
643
	struct drm_dp_sideband_msg_req_body req;
644
 
645
	req.req_type = DP_REMOTE_DPCD_WRITE;
646
	req.u.dpcd_write.port_number = port_num;
647
	req.u.dpcd_write.dpcd_address = offset;
648
	req.u.dpcd_write.num_bytes = num_bytes;
649
	req.u.dpcd_write.bytes = bytes;
650
	drm_dp_encode_sideband_req(&req, msg);
651
 
652
	return 0;
653
}
654
 
655
static int build_link_address(struct drm_dp_sideband_msg_tx *msg)
656
{
657
	struct drm_dp_sideband_msg_req_body req;
658
 
659
	req.req_type = DP_LINK_ADDRESS;
660
	drm_dp_encode_sideband_req(&req, msg);
661
	return 0;
662
}
663
 
664
static int build_enum_path_resources(struct drm_dp_sideband_msg_tx *msg, int port_num)
665
{
666
	struct drm_dp_sideband_msg_req_body req;
667
 
668
	req.req_type = DP_ENUM_PATH_RESOURCES;
669
	req.u.port_num.port_number = port_num;
670
	drm_dp_encode_sideband_req(&req, msg);
671
	msg->path_msg = true;
672
	return 0;
673
}
674
 
675
static int build_allocate_payload(struct drm_dp_sideband_msg_tx *msg, int port_num,
676
				  u8 vcpi, uint16_t pbn)
677
{
678
	struct drm_dp_sideband_msg_req_body req;
679
	memset(&req, 0, sizeof(req));
680
	req.req_type = DP_ALLOCATE_PAYLOAD;
681
	req.u.allocate_payload.port_number = port_num;
682
	req.u.allocate_payload.vcpi = vcpi;
683
	req.u.allocate_payload.pbn = pbn;
684
	drm_dp_encode_sideband_req(&req, msg);
685
	msg->path_msg = true;
686
	return 0;
687
}
688
 
689
static int drm_dp_mst_assign_payload_id(struct drm_dp_mst_topology_mgr *mgr,
690
					struct drm_dp_vcpi *vcpi)
691
{
5271 serge 692
	int ret, vcpi_ret;
5060 serge 693
 
694
	mutex_lock(&mgr->payload_lock);
695
	ret = find_first_zero_bit(&mgr->payload_mask, mgr->max_payloads + 1);
696
	if (ret > mgr->max_payloads) {
697
		ret = -EINVAL;
698
		DRM_DEBUG_KMS("out of payload ids %d\n", ret);
699
		goto out_unlock;
700
	}
701
 
5271 serge 702
	vcpi_ret = find_first_zero_bit(&mgr->vcpi_mask, mgr->max_payloads + 1);
703
	if (vcpi_ret > mgr->max_payloads) {
704
		ret = -EINVAL;
705
		DRM_DEBUG_KMS("out of vcpi ids %d\n", ret);
706
		goto out_unlock;
707
	}
708
 
5060 serge 709
	set_bit(ret, &mgr->payload_mask);
5271 serge 710
	set_bit(vcpi_ret, &mgr->vcpi_mask);
711
	vcpi->vcpi = vcpi_ret + 1;
5060 serge 712
	mgr->proposed_vcpis[ret - 1] = vcpi;
713
out_unlock:
714
	mutex_unlock(&mgr->payload_lock);
715
	return ret;
716
}
717
 
718
static void drm_dp_mst_put_payload_id(struct drm_dp_mst_topology_mgr *mgr,
5271 serge 719
				      int vcpi)
5060 serge 720
{
5271 serge 721
	int i;
722
	if (vcpi == 0)
5060 serge 723
		return;
724
 
725
	mutex_lock(&mgr->payload_lock);
5271 serge 726
	DRM_DEBUG_KMS("putting payload %d\n", vcpi);
727
	clear_bit(vcpi - 1, &mgr->vcpi_mask);
728
 
729
	for (i = 0; i < mgr->max_payloads; i++) {
730
		if (mgr->proposed_vcpis[i])
731
			if (mgr->proposed_vcpis[i]->vcpi == vcpi) {
732
				mgr->proposed_vcpis[i] = NULL;
733
				clear_bit(i + 1, &mgr->payload_mask);
734
			}
735
	}
5060 serge 736
	mutex_unlock(&mgr->payload_lock);
737
}
738
 
739
static bool check_txmsg_state(struct drm_dp_mst_topology_mgr *mgr,
740
			      struct drm_dp_sideband_msg_tx *txmsg)
741
{
742
	bool ret;
743
	mutex_lock(&mgr->qlock);
744
	ret = (txmsg->state == DRM_DP_SIDEBAND_TX_RX ||
745
	       txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT);
746
	mutex_unlock(&mgr->qlock);
747
	return ret;
748
}
749
 
750
static int drm_dp_mst_wait_tx_reply(struct drm_dp_mst_branch *mstb,
751
				    struct drm_dp_sideband_msg_tx *txmsg)
752
{
753
	struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
754
	int ret;
755
 
756
	ret = wait_event_timeout(mgr->tx_waitq,
757
				 check_txmsg_state(mgr, txmsg),
758
				 (4 * HZ));
759
	mutex_lock(&mstb->mgr->qlock);
760
	if (ret > 0) {
761
		if (txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT) {
762
			ret = -EIO;
763
			goto out;
764
		}
765
	} else {
766
		DRM_DEBUG_KMS("timedout msg send %p %d %d\n", txmsg, txmsg->state, txmsg->seqno);
767
 
768
		/* dump some state */
769
		ret = -EIO;
770
 
771
		/* remove from q */
772
		if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED ||
773
		    txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND) {
774
			list_del(&txmsg->next);
775
		}
776
 
777
		if (txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND ||
778
		    txmsg->state == DRM_DP_SIDEBAND_TX_SENT) {
779
			mstb->tx_slots[txmsg->seqno] = NULL;
780
		}
781
	}
782
out:
783
	mutex_unlock(&mgr->qlock);
784
 
785
	return ret;
786
}
787
 
788
static struct drm_dp_mst_branch *drm_dp_add_mst_branch_device(u8 lct, u8 *rad)
789
{
790
	struct drm_dp_mst_branch *mstb;
791
 
792
	mstb = kzalloc(sizeof(*mstb), GFP_KERNEL);
793
	if (!mstb)
794
		return NULL;
795
 
796
	mstb->lct = lct;
797
	if (lct > 1)
798
		memcpy(mstb->rad, rad, lct / 2);
799
	INIT_LIST_HEAD(&mstb->ports);
800
	kref_init(&mstb->kref);
801
	return mstb;
802
}
803
 
804
static void drm_dp_destroy_mst_branch_device(struct kref *kref)
805
{
806
	struct drm_dp_mst_branch *mstb = container_of(kref, struct drm_dp_mst_branch, kref);
807
	struct drm_dp_mst_port *port, *tmp;
808
	bool wake_tx = false;
809
 
810
	cancel_work_sync(&mstb->mgr->work);
811
 
812
	/*
813
	 * destroy all ports - don't need lock
814
	 * as there are no more references to the mst branch
815
	 * device at this point.
816
	 */
817
	list_for_each_entry_safe(port, tmp, &mstb->ports, next) {
818
		list_del(&port->next);
819
		drm_dp_put_port(port);
820
	}
821
 
822
	/* drop any tx slots msg */
823
	mutex_lock(&mstb->mgr->qlock);
824
	if (mstb->tx_slots[0]) {
825
		mstb->tx_slots[0]->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
826
		mstb->tx_slots[0] = NULL;
827
		wake_tx = true;
828
	}
829
	if (mstb->tx_slots[1]) {
830
		mstb->tx_slots[1]->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
831
		mstb->tx_slots[1] = NULL;
832
		wake_tx = true;
833
	}
834
	mutex_unlock(&mstb->mgr->qlock);
835
 
836
//   if (wake_tx)
837
//       wake_up(&mstb->mgr->tx_waitq);
838
	kfree(mstb);
839
}
840
 
841
static void drm_dp_put_mst_branch_device(struct drm_dp_mst_branch *mstb)
842
{
843
	kref_put(&mstb->kref, drm_dp_destroy_mst_branch_device);
844
}
845
 
846
 
847
static void drm_dp_port_teardown_pdt(struct drm_dp_mst_port *port, int old_pdt)
848
{
5271 serge 849
	struct drm_dp_mst_branch *mstb;
850
 
5060 serge 851
	switch (old_pdt) {
852
	case DP_PEER_DEVICE_DP_LEGACY_CONV:
853
	case DP_PEER_DEVICE_SST_SINK:
854
		/* remove i2c over sideband */
855
		drm_dp_mst_unregister_i2c_bus(&port->aux);
856
		break;
857
	case DP_PEER_DEVICE_MST_BRANCHING:
5271 serge 858
		mstb = port->mstb;
5060 serge 859
		port->mstb = NULL;
5271 serge 860
		drm_dp_put_mst_branch_device(mstb);
5060 serge 861
		break;
862
	}
863
}
864
 
865
static void drm_dp_destroy_port(struct kref *kref)
866
{
867
	struct drm_dp_mst_port *port = container_of(kref, struct drm_dp_mst_port, kref);
868
	struct drm_dp_mst_topology_mgr *mgr = port->mgr;
869
	if (!port->input) {
870
		port->vcpi.num_slots = 0;
5271 serge 871
 
872
		kfree(port->cached_edid);
5060 serge 873
		if (port->connector)
874
			(*port->mgr->cbs->destroy_connector)(mgr, port->connector);
875
		drm_dp_port_teardown_pdt(port, port->pdt);
876
 
877
		if (!port->input && port->vcpi.vcpi > 0)
878
			drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
879
	}
880
	kfree(port);
881
 
882
	(*mgr->cbs->hotplug)(mgr);
883
}
884
 
885
static void drm_dp_put_port(struct drm_dp_mst_port *port)
886
{
887
	kref_put(&port->kref, drm_dp_destroy_port);
888
}
889
 
890
static struct drm_dp_mst_branch *drm_dp_mst_get_validated_mstb_ref_locked(struct drm_dp_mst_branch *mstb, struct drm_dp_mst_branch *to_find)
891
{
892
	struct drm_dp_mst_port *port;
893
	struct drm_dp_mst_branch *rmstb;
894
	if (to_find == mstb) {
895
		kref_get(&mstb->kref);
896
		return mstb;
897
	}
898
	list_for_each_entry(port, &mstb->ports, next) {
899
		if (port->mstb) {
900
			rmstb = drm_dp_mst_get_validated_mstb_ref_locked(port->mstb, to_find);
901
			if (rmstb)
902
				return rmstb;
903
		}
904
	}
905
	return NULL;
906
}
907
 
908
static struct drm_dp_mst_branch *drm_dp_get_validated_mstb_ref(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_branch *mstb)
909
{
910
	struct drm_dp_mst_branch *rmstb = NULL;
911
	mutex_lock(&mgr->lock);
912
	if (mgr->mst_primary)
913
		rmstb = drm_dp_mst_get_validated_mstb_ref_locked(mgr->mst_primary, mstb);
914
	mutex_unlock(&mgr->lock);
915
	return rmstb;
916
}
917
 
918
static struct drm_dp_mst_port *drm_dp_mst_get_port_ref_locked(struct drm_dp_mst_branch *mstb, struct drm_dp_mst_port *to_find)
919
{
920
	struct drm_dp_mst_port *port, *mport;
921
 
922
	list_for_each_entry(port, &mstb->ports, next) {
923
		if (port == to_find) {
924
			kref_get(&port->kref);
925
			return port;
926
		}
927
		if (port->mstb) {
928
			mport = drm_dp_mst_get_port_ref_locked(port->mstb, to_find);
929
			if (mport)
930
				return mport;
931
		}
932
	}
933
	return NULL;
934
}
935
 
936
static struct drm_dp_mst_port *drm_dp_get_validated_port_ref(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
937
{
938
	struct drm_dp_mst_port *rport = NULL;
939
	mutex_lock(&mgr->lock);
940
	if (mgr->mst_primary)
941
		rport = drm_dp_mst_get_port_ref_locked(mgr->mst_primary, port);
942
	mutex_unlock(&mgr->lock);
943
	return rport;
944
}
945
 
946
static struct drm_dp_mst_port *drm_dp_get_port(struct drm_dp_mst_branch *mstb, u8 port_num)
947
{
948
	struct drm_dp_mst_port *port;
949
 
950
	list_for_each_entry(port, &mstb->ports, next) {
951
		if (port->port_num == port_num) {
952
			kref_get(&port->kref);
953
			return port;
954
		}
955
	}
956
 
957
	return NULL;
958
}
959
 
960
/*
961
 * calculate a new RAD for this MST branch device
962
 * if parent has an LCT of 2 then it has 1 nibble of RAD,
963
 * if parent has an LCT of 3 then it has 2 nibbles of RAD,
964
 */
965
static u8 drm_dp_calculate_rad(struct drm_dp_mst_port *port,
966
				 u8 *rad)
967
{
968
	int lct = port->parent->lct;
969
	int shift = 4;
970
	int idx = lct / 2;
971
	if (lct > 1) {
972
		memcpy(rad, port->parent->rad, idx);
973
		shift = (lct % 2) ? 4 : 0;
974
	} else
975
		rad[0] = 0;
976
 
977
	rad[idx] |= port->port_num << shift;
978
	return lct + 1;
979
}
980
 
981
/*
982
 * return sends link address for new mstb
983
 */
984
static bool drm_dp_port_setup_pdt(struct drm_dp_mst_port *port)
985
{
986
	int ret;
987
	u8 rad[6], lct;
988
	bool send_link = false;
989
	switch (port->pdt) {
990
	case DP_PEER_DEVICE_DP_LEGACY_CONV:
991
	case DP_PEER_DEVICE_SST_SINK:
992
		/* add i2c over sideband */
993
		ret = drm_dp_mst_register_i2c_bus(&port->aux);
994
		break;
995
	case DP_PEER_DEVICE_MST_BRANCHING:
996
		lct = drm_dp_calculate_rad(port, rad);
997
 
998
		port->mstb = drm_dp_add_mst_branch_device(lct, rad);
999
		port->mstb->mgr = port->mgr;
1000
		port->mstb->port_parent = port;
1001
 
1002
		send_link = true;
1003
		break;
1004
	}
1005
	return send_link;
1006
}
1007
 
1008
static void drm_dp_check_port_guid(struct drm_dp_mst_branch *mstb,
1009
				   struct drm_dp_mst_port *port)
1010
{
1011
	int ret;
1012
	if (port->dpcd_rev >= 0x12) {
1013
		port->guid_valid = drm_dp_validate_guid(mstb->mgr, port->guid);
1014
		if (!port->guid_valid) {
1015
			ret = drm_dp_send_dpcd_write(mstb->mgr,
1016
						     port,
1017
						     DP_GUID,
1018
						     16, port->guid);
1019
			port->guid_valid = true;
1020
		}
1021
	}
1022
}
1023
 
1024
static void build_mst_prop_path(struct drm_dp_mst_port *port,
1025
				struct drm_dp_mst_branch *mstb,
5271 serge 1026
				char *proppath,
1027
				size_t proppath_size)
5060 serge 1028
{
1029
	int i;
1030
	char temp[8];
5271 serge 1031
	snprintf(proppath, proppath_size, "mst:%d", mstb->mgr->conn_base_id);
5060 serge 1032
	for (i = 0; i < (mstb->lct - 1); i++) {
1033
		int shift = (i % 2) ? 0 : 4;
1034
		int port_num = mstb->rad[i / 2] >> shift;
5271 serge 1035
		snprintf(temp, sizeof(temp), "-%d", port_num);
1036
		strlcat(proppath, temp, proppath_size);
5060 serge 1037
	}
5271 serge 1038
	snprintf(temp, sizeof(temp), "-%d", port->port_num);
1039
	strlcat(proppath, temp, proppath_size);
5060 serge 1040
}
1041
 
1042
static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
1043
			    struct device *dev,
1044
			    struct drm_dp_link_addr_reply_port *port_msg)
1045
{
1046
	struct drm_dp_mst_port *port;
1047
	bool ret;
1048
	bool created = false;
1049
	int old_pdt = 0;
1050
	int old_ddps = 0;
1051
	port = drm_dp_get_port(mstb, port_msg->port_number);
1052
	if (!port) {
1053
		port = kzalloc(sizeof(*port), GFP_KERNEL);
1054
		if (!port)
1055
			return;
1056
		kref_init(&port->kref);
1057
		port->parent = mstb;
1058
		port->port_num = port_msg->port_number;
1059
		port->mgr = mstb->mgr;
1060
		port->aux.name = "DPMST";
1061
		port->aux.dev = dev;
1062
		created = true;
1063
	} else {
1064
		old_pdt = port->pdt;
1065
		old_ddps = port->ddps;
1066
	}
1067
 
1068
	port->pdt = port_msg->peer_device_type;
1069
	port->input = port_msg->input_port;
1070
	port->mcs = port_msg->mcs;
1071
	port->ddps = port_msg->ddps;
1072
	port->ldps = port_msg->legacy_device_plug_status;
1073
	port->dpcd_rev = port_msg->dpcd_revision;
1074
	port->num_sdp_streams = port_msg->num_sdp_streams;
1075
	port->num_sdp_stream_sinks = port_msg->num_sdp_stream_sinks;
1076
	memcpy(port->guid, port_msg->peer_guid, 16);
1077
 
1078
	/* manage mstb port lists with mgr lock - take a reference
1079
	   for this list */
1080
	if (created) {
1081
		mutex_lock(&mstb->mgr->lock);
1082
		kref_get(&port->kref);
1083
		list_add(&port->next, &mstb->ports);
1084
		mutex_unlock(&mstb->mgr->lock);
1085
	}
1086
 
1087
	if (old_ddps != port->ddps) {
1088
		if (port->ddps) {
1089
			drm_dp_check_port_guid(mstb, port);
1090
			if (!port->input)
1091
				drm_dp_send_enum_path_resources(mstb->mgr, mstb, port);
1092
		} else {
1093
			port->guid_valid = false;
1094
			port->available_pbn = 0;
1095
			}
1096
	}
1097
 
1098
	if (old_pdt != port->pdt && !port->input) {
1099
		drm_dp_port_teardown_pdt(port, old_pdt);
1100
 
1101
		ret = drm_dp_port_setup_pdt(port);
1102
		if (ret == true) {
1103
			drm_dp_send_link_address(mstb->mgr, port->mstb);
1104
			port->mstb->link_address_sent = true;
1105
		}
1106
	}
1107
 
1108
	if (created && !port->input) {
1109
		char proppath[255];
5271 serge 1110
		build_mst_prop_path(port, mstb, proppath, sizeof(proppath));
5060 serge 1111
		port->connector = (*mstb->mgr->cbs->add_connector)(mstb->mgr, port, proppath);
5271 serge 1112
 
1113
		if (port->port_num >= 8) {
1114
			port->cached_edid = drm_get_edid(port->connector, &port->aux.ddc);
1115
		}
5060 serge 1116
	}
1117
 
1118
	/* put reference to this port */
1119
	drm_dp_put_port(port);
1120
}
1121
 
1122
static void drm_dp_update_port(struct drm_dp_mst_branch *mstb,
1123
			       struct drm_dp_connection_status_notify *conn_stat)
1124
{
1125
	struct drm_dp_mst_port *port;
1126
	int old_pdt;
1127
	int old_ddps;
1128
	bool dowork = false;
1129
	port = drm_dp_get_port(mstb, conn_stat->port_number);
1130
	if (!port)
1131
		return;
1132
 
1133
	old_ddps = port->ddps;
1134
	old_pdt = port->pdt;
1135
	port->pdt = conn_stat->peer_device_type;
1136
	port->mcs = conn_stat->message_capability_status;
1137
	port->ldps = conn_stat->legacy_device_plug_status;
1138
	port->ddps = conn_stat->displayport_device_plug_status;
1139
 
1140
	if (old_ddps != port->ddps) {
1141
		if (port->ddps) {
1142
			drm_dp_check_port_guid(mstb, port);
1143
			dowork = true;
1144
		} else {
1145
			port->guid_valid = false;
1146
			port->available_pbn = 0;
1147
		}
1148
	}
1149
	if (old_pdt != port->pdt && !port->input) {
1150
		drm_dp_port_teardown_pdt(port, old_pdt);
1151
 
1152
		if (drm_dp_port_setup_pdt(port))
1153
			dowork = true;
1154
	}
1155
 
1156
	drm_dp_put_port(port);
1157
//   if (dowork)
1158
//       queue_work(system_long_wq, &mstb->mgr->work);
1159
 
1160
}
1161
 
1162
static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_topology_mgr *mgr,
1163
							       u8 lct, u8 *rad)
1164
{
1165
	struct drm_dp_mst_branch *mstb;
1166
	struct drm_dp_mst_port *port;
1167
	int i;
1168
	/* find the port by iterating down */
1169
	mstb = mgr->mst_primary;
1170
 
1171
	for (i = 0; i < lct - 1; i++) {
1172
		int shift = (i % 2) ? 0 : 4;
1173
		int port_num = rad[i / 2] >> shift;
1174
 
1175
		list_for_each_entry(port, &mstb->ports, next) {
1176
			if (port->port_num == port_num) {
1177
				if (!port->mstb) {
1178
					DRM_ERROR("failed to lookup MSTB with lct %d, rad %02x\n", lct, rad[0]);
1179
					return NULL;
1180
				}
1181
 
1182
				mstb = port->mstb;
1183
				break;
1184
			}
1185
		}
1186
	}
1187
	kref_get(&mstb->kref);
1188
	return mstb;
1189
}
1190
 
1191
static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
1192
					       struct drm_dp_mst_branch *mstb)
1193
{
1194
	struct drm_dp_mst_port *port;
1195
 
1196
	if (!mstb->link_address_sent) {
1197
		drm_dp_send_link_address(mgr, mstb);
1198
		mstb->link_address_sent = true;
1199
	}
1200
	list_for_each_entry(port, &mstb->ports, next) {
1201
		if (port->input)
1202
			continue;
1203
 
1204
		if (!port->ddps)
1205
			continue;
1206
 
1207
		if (!port->available_pbn)
1208
			drm_dp_send_enum_path_resources(mgr, mstb, port);
1209
 
1210
		if (port->mstb)
1211
			drm_dp_check_and_send_link_address(mgr, port->mstb);
1212
	}
1213
}
1214
 
1215
static void drm_dp_mst_link_probe_work(struct work_struct *work)
1216
{
1217
	struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, work);
1218
 
1219
	drm_dp_check_and_send_link_address(mgr, mgr->mst_primary);
1220
 
1221
}
1222
 
1223
static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
1224
				 u8 *guid)
1225
{
1226
	static u8 zero_guid[16];
1227
 
1228
	if (!memcmp(guid, zero_guid, 16)) {
1229
		u64 salt = get_jiffies_64();
1230
		memcpy(&guid[0], &salt, sizeof(u64));
1231
		memcpy(&guid[8], &salt, sizeof(u64));
1232
		return false;
1233
	}
1234
	return true;
1235
}
1236
 
1237
#if 0
1238
static int build_dpcd_read(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32 offset, u8 num_bytes)
1239
{
1240
	struct drm_dp_sideband_msg_req_body req;
1241
 
1242
	req.req_type = DP_REMOTE_DPCD_READ;
1243
	req.u.dpcd_read.port_number = port_num;
1244
	req.u.dpcd_read.dpcd_address = offset;
1245
	req.u.dpcd_read.num_bytes = num_bytes;
1246
	drm_dp_encode_sideband_req(&req, msg);
1247
 
1248
	return 0;
1249
}
1250
#endif
1251
 
1252
static int drm_dp_send_sideband_msg(struct drm_dp_mst_topology_mgr *mgr,
1253
				    bool up, u8 *msg, int len)
1254
{
1255
	int ret;
1256
	int regbase = up ? DP_SIDEBAND_MSG_UP_REP_BASE : DP_SIDEBAND_MSG_DOWN_REQ_BASE;
1257
	int tosend, total, offset;
1258
	int retries = 0;
1259
 
1260
retry:
1261
	total = len;
1262
	offset = 0;
1263
	do {
1264
		tosend = min3(mgr->max_dpcd_transaction_bytes, 16, total);
1265
 
1266
		ret = drm_dp_dpcd_write(mgr->aux, regbase + offset,
1267
					&msg[offset],
1268
					tosend);
1269
		if (ret != tosend) {
1270
			if (ret == -EIO && retries < 5) {
1271
				retries++;
1272
				goto retry;
1273
			}
1274
			DRM_DEBUG_KMS("failed to dpcd write %d %d\n", tosend, ret);
1275
			WARN(1, "fail\n");
1276
 
1277
			return -EIO;
1278
		}
1279
		offset += tosend;
1280
		total -= tosend;
1281
	} while (total > 0);
1282
	return 0;
1283
}
1284
 
1285
static int set_hdr_from_dst_qlock(struct drm_dp_sideband_msg_hdr *hdr,
1286
				  struct drm_dp_sideband_msg_tx *txmsg)
1287
{
1288
	struct drm_dp_mst_branch *mstb = txmsg->dst;
1289
 
1290
	/* both msg slots are full */
1291
	if (txmsg->seqno == -1) {
1292
		if (mstb->tx_slots[0] && mstb->tx_slots[1]) {
1293
			DRM_DEBUG_KMS("%s: failed to find slot\n", __func__);
1294
			return -EAGAIN;
1295
		}
1296
		if (mstb->tx_slots[0] == NULL && mstb->tx_slots[1] == NULL) {
1297
			txmsg->seqno = mstb->last_seqno;
1298
			mstb->last_seqno ^= 1;
1299
		} else if (mstb->tx_slots[0] == NULL)
1300
			txmsg->seqno = 0;
1301
		else
1302
			txmsg->seqno = 1;
1303
		mstb->tx_slots[txmsg->seqno] = txmsg;
1304
	}
1305
	hdr->broadcast = 0;
1306
	hdr->path_msg = txmsg->path_msg;
1307
	hdr->lct = mstb->lct;
1308
	hdr->lcr = mstb->lct - 1;
1309
	if (mstb->lct > 1)
1310
		memcpy(hdr->rad, mstb->rad, mstb->lct / 2);
1311
	hdr->seqno = txmsg->seqno;
1312
	return 0;
1313
}
1314
/*
1315
 * process a single block of the next message in the sideband queue
1316
 */
1317
static int process_single_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
1318
				   struct drm_dp_sideband_msg_tx *txmsg,
1319
				   bool up)
1320
{
1321
	u8 chunk[48];
1322
	struct drm_dp_sideband_msg_hdr hdr;
1323
	int len, space, idx, tosend;
1324
	int ret;
1325
 
1326
	memset(&hdr, 0, sizeof(struct drm_dp_sideband_msg_hdr));
1327
 
1328
	if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED) {
1329
		txmsg->seqno = -1;
1330
		txmsg->state = DRM_DP_SIDEBAND_TX_START_SEND;
1331
	}
1332
 
1333
	/* make hdr from dst mst - for replies use seqno
1334
	   otherwise assign one */
1335
	ret = set_hdr_from_dst_qlock(&hdr, txmsg);
1336
	if (ret < 0)
1337
		return ret;
1338
 
1339
	/* amount left to send in this message */
1340
	len = txmsg->cur_len - txmsg->cur_offset;
1341
 
1342
	/* 48 - sideband msg size - 1 byte for data CRC, x header bytes */
1343
	space = 48 - 1 - drm_dp_calc_sb_hdr_size(&hdr);
1344
 
1345
	tosend = min(len, space);
1346
	if (len == txmsg->cur_len)
1347
		hdr.somt = 1;
1348
	if (space >= len)
1349
		hdr.eomt = 1;
1350
 
1351
 
1352
	hdr.msg_len = tosend + 1;
1353
	drm_dp_encode_sideband_msg_hdr(&hdr, chunk, &idx);
1354
	memcpy(&chunk[idx], &txmsg->msg[txmsg->cur_offset], tosend);
1355
	/* add crc at end */
1356
	drm_dp_crc_sideband_chunk_req(&chunk[idx], tosend);
1357
	idx += tosend + 1;
1358
 
1359
	ret = drm_dp_send_sideband_msg(mgr, up, chunk, idx);
1360
	if (ret) {
1361
		DRM_DEBUG_KMS("sideband msg failed to send\n");
1362
		return ret;
1363
	}
1364
 
1365
	txmsg->cur_offset += tosend;
1366
	if (txmsg->cur_offset == txmsg->cur_len) {
1367
		txmsg->state = DRM_DP_SIDEBAND_TX_SENT;
1368
		return 1;
1369
	}
1370
	return 0;
1371
}
1372
 
1373
/* must be called holding qlock */
1374
static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
1375
{
1376
	struct drm_dp_sideband_msg_tx *txmsg;
1377
	int ret;
1378
 
1379
	/* construct a chunk from the first msg in the tx_msg queue */
1380
	if (list_empty(&mgr->tx_msg_downq)) {
1381
		mgr->tx_down_in_progress = false;
1382
		return;
1383
	}
1384
	mgr->tx_down_in_progress = true;
1385
 
1386
	txmsg = list_first_entry(&mgr->tx_msg_downq, struct drm_dp_sideband_msg_tx, next);
1387
	ret = process_single_tx_qlock(mgr, txmsg, false);
1388
	if (ret == 1) {
1389
		/* txmsg is sent it should be in the slots now */
1390
		list_del(&txmsg->next);
1391
	} else if (ret) {
1392
		DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
1393
		list_del(&txmsg->next);
1394
		if (txmsg->seqno != -1)
1395
			txmsg->dst->tx_slots[txmsg->seqno] = NULL;
1396
		txmsg->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
1397
//       wake_up(&mgr->tx_waitq);
1398
	}
1399
	if (list_empty(&mgr->tx_msg_downq)) {
1400
		mgr->tx_down_in_progress = false;
1401
		return;
1402
	}
1403
}
1404
 
1405
/* called holding qlock */
1406
static void process_single_up_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
1407
{
1408
	struct drm_dp_sideband_msg_tx *txmsg;
1409
	int ret;
1410
 
1411
	/* construct a chunk from the first msg in the tx_msg queue */
1412
	if (list_empty(&mgr->tx_msg_upq)) {
1413
		mgr->tx_up_in_progress = false;
1414
		return;
1415
	}
1416
 
1417
	txmsg = list_first_entry(&mgr->tx_msg_upq, struct drm_dp_sideband_msg_tx, next);
1418
	ret = process_single_tx_qlock(mgr, txmsg, true);
1419
	if (ret == 1) {
1420
		/* up txmsgs aren't put in slots - so free after we send it */
1421
		list_del(&txmsg->next);
1422
		kfree(txmsg);
1423
	} else if (ret)
1424
		DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
1425
	mgr->tx_up_in_progress = true;
1426
}
1427
 
1428
static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,
1429
				 struct drm_dp_sideband_msg_tx *txmsg)
1430
{
1431
	mutex_lock(&mgr->qlock);
1432
	list_add_tail(&txmsg->next, &mgr->tx_msg_downq);
1433
	if (!mgr->tx_down_in_progress)
1434
		process_single_down_tx_qlock(mgr);
1435
	mutex_unlock(&mgr->qlock);
1436
}
1437
 
1438
static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
1439
				    struct drm_dp_mst_branch *mstb)
1440
{
1441
	int len;
1442
	struct drm_dp_sideband_msg_tx *txmsg;
1443
	int ret;
1444
 
1445
	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
1446
	if (!txmsg)
1447
		return -ENOMEM;
1448
 
1449
	txmsg->dst = mstb;
1450
	len = build_link_address(txmsg);
1451
 
1452
	drm_dp_queue_down_tx(mgr, txmsg);
1453
 
1454
	ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
1455
	if (ret > 0) {
1456
		int i;
1457
 
1458
		if (txmsg->reply.reply_type == 1)
1459
			DRM_DEBUG_KMS("link address nak received\n");
1460
		else {
1461
			DRM_DEBUG_KMS("link address reply: %d\n", txmsg->reply.u.link_addr.nports);
1462
			for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) {
1463
				DRM_DEBUG_KMS("port %d: input %d, pdt: %d, pn: %d, dpcd_rev: %02x, mcs: %d, ddps: %d, ldps %d, sdp %d/%d\n", i,
1464
				       txmsg->reply.u.link_addr.ports[i].input_port,
1465
				       txmsg->reply.u.link_addr.ports[i].peer_device_type,
1466
				       txmsg->reply.u.link_addr.ports[i].port_number,
1467
				       txmsg->reply.u.link_addr.ports[i].dpcd_revision,
1468
				       txmsg->reply.u.link_addr.ports[i].mcs,
1469
				       txmsg->reply.u.link_addr.ports[i].ddps,
1470
				       txmsg->reply.u.link_addr.ports[i].legacy_device_plug_status,
1471
				       txmsg->reply.u.link_addr.ports[i].num_sdp_streams,
1472
				       txmsg->reply.u.link_addr.ports[i].num_sdp_stream_sinks);
1473
			}
1474
			for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) {
1475
				drm_dp_add_port(mstb, mgr->dev, &txmsg->reply.u.link_addr.ports[i]);
1476
			}
1477
			(*mgr->cbs->hotplug)(mgr);
1478
		}
1479
	} else
1480
		DRM_DEBUG_KMS("link address failed %d\n", ret);
1481
 
1482
	kfree(txmsg);
1483
	return 0;
1484
}
1485
 
1486
static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
1487
					   struct drm_dp_mst_branch *mstb,
1488
					   struct drm_dp_mst_port *port)
1489
{
1490
	int len;
1491
	struct drm_dp_sideband_msg_tx *txmsg;
1492
	int ret;
1493
 
1494
	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
1495
	if (!txmsg)
1496
		return -ENOMEM;
1497
 
1498
	txmsg->dst = mstb;
1499
	len = build_enum_path_resources(txmsg, port->port_num);
1500
 
1501
	drm_dp_queue_down_tx(mgr, txmsg);
1502
 
1503
	ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
1504
	if (ret > 0) {
1505
		if (txmsg->reply.reply_type == 1)
1506
			DRM_DEBUG_KMS("enum path resources nak received\n");
1507
		else {
1508
			if (port->port_num != txmsg->reply.u.path_resources.port_number)
1509
				DRM_ERROR("got incorrect port in response\n");
1510
			DRM_DEBUG_KMS("enum path resources %d: %d %d\n", txmsg->reply.u.path_resources.port_number, txmsg->reply.u.path_resources.full_payload_bw_number,
1511
			       txmsg->reply.u.path_resources.avail_payload_bw_number);
1512
			port->available_pbn = txmsg->reply.u.path_resources.avail_payload_bw_number;
1513
		}
1514
	}
1515
 
1516
	kfree(txmsg);
1517
	return 0;
1518
}
1519
 
1520
static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
1521
				   struct drm_dp_mst_port *port,
1522
				   int id,
1523
				   int pbn)
1524
{
1525
	struct drm_dp_sideband_msg_tx *txmsg;
1526
	struct drm_dp_mst_branch *mstb;
1527
	int len, ret;
1528
 
1529
	mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
1530
	if (!mstb)
1531
		return -EINVAL;
1532
 
1533
	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
1534
	if (!txmsg) {
1535
		ret = -ENOMEM;
1536
		goto fail_put;
1537
	}
1538
 
1539
	txmsg->dst = mstb;
1540
	len = build_allocate_payload(txmsg, port->port_num,
1541
				     id,
1542
				     pbn);
1543
 
1544
	drm_dp_queue_down_tx(mgr, txmsg);
1545
 
1546
	ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
1547
	if (ret > 0) {
1548
		if (txmsg->reply.reply_type == 1) {
1549
			ret = -EINVAL;
1550
		} else
1551
			ret = 0;
1552
	}
1553
	kfree(txmsg);
1554
fail_put:
1555
	drm_dp_put_mst_branch_device(mstb);
1556
	return ret;
1557
}
1558
 
1559
static int drm_dp_create_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
1560
				       int id,
1561
				       struct drm_dp_payload *payload)
1562
{
1563
	int ret;
1564
 
1565
	ret = drm_dp_dpcd_write_payload(mgr, id, payload);
1566
	if (ret < 0) {
1567
		payload->payload_state = 0;
1568
		return ret;
1569
	}
1570
	payload->payload_state = DP_PAYLOAD_LOCAL;
1571
	return 0;
1572
}
1573
 
1574
static int drm_dp_create_payload_step2(struct drm_dp_mst_topology_mgr *mgr,
1575
				       struct drm_dp_mst_port *port,
1576
				       int id,
1577
				       struct drm_dp_payload *payload)
1578
{
1579
	int ret;
1580
	ret = drm_dp_payload_send_msg(mgr, port, id, port->vcpi.pbn);
1581
	if (ret < 0)
1582
		return ret;
1583
	payload->payload_state = DP_PAYLOAD_REMOTE;
1584
	return ret;
1585
}
1586
 
1587
static int drm_dp_destroy_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
1588
					struct drm_dp_mst_port *port,
1589
					int id,
1590
					struct drm_dp_payload *payload)
1591
{
1592
	DRM_DEBUG_KMS("\n");
1593
	/* its okay for these to fail */
1594
	if (port) {
1595
		drm_dp_payload_send_msg(mgr, port, id, 0);
1596
	}
1597
 
1598
	drm_dp_dpcd_write_payload(mgr, id, payload);
5271 serge 1599
	payload->payload_state = DP_PAYLOAD_DELETE_LOCAL;
5060 serge 1600
	return 0;
1601
}
1602
 
1603
static int drm_dp_destroy_payload_step2(struct drm_dp_mst_topology_mgr *mgr,
1604
					int id,
1605
					struct drm_dp_payload *payload)
1606
{
1607
	payload->payload_state = 0;
1608
	return 0;
1609
}
1610
 
1611
/**
1612
 * drm_dp_update_payload_part1() - Execute payload update part 1
1613
 * @mgr: manager to use.
1614
 *
1615
 * This iterates over all proposed virtual channels, and tries to
1616
 * allocate space in the link for them. For 0->slots transitions,
1617
 * this step just writes the VCPI to the MST device. For slots->0
1618
 * transitions, this writes the updated VCPIs and removes the
1619
 * remote VC payloads.
1620
 *
1621
 * after calling this the driver should generate ACT and payload
1622
 * packets.
1623
 */
1624
int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
1625
{
5271 serge 1626
	int i, j;
5060 serge 1627
	int cur_slots = 1;
1628
	struct drm_dp_payload req_payload;
1629
	struct drm_dp_mst_port *port;
1630
 
1631
	mutex_lock(&mgr->payload_lock);
1632
	for (i = 0; i < mgr->max_payloads; i++) {
1633
		/* solve the current payloads - compare to the hw ones
1634
		   - update the hw view */
1635
		req_payload.start_slot = cur_slots;
1636
		if (mgr->proposed_vcpis[i]) {
1637
			port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
1638
			req_payload.num_slots = mgr->proposed_vcpis[i]->num_slots;
1639
		} else {
1640
			port = NULL;
1641
			req_payload.num_slots = 0;
1642
		}
5271 serge 1643
 
1644
		if (mgr->payloads[i].start_slot != req_payload.start_slot) {
1645
			mgr->payloads[i].start_slot = req_payload.start_slot;
1646
		}
5060 serge 1647
		/* work out what is required to happen with this payload */
5271 serge 1648
		if (mgr->payloads[i].num_slots != req_payload.num_slots) {
5060 serge 1649
 
1650
			/* need to push an update for this payload */
1651
			if (req_payload.num_slots) {
5271 serge 1652
				drm_dp_create_payload_step1(mgr, mgr->proposed_vcpis[i]->vcpi, &req_payload);
5060 serge 1653
				mgr->payloads[i].num_slots = req_payload.num_slots;
1654
			} else if (mgr->payloads[i].num_slots) {
1655
				mgr->payloads[i].num_slots = 0;
5271 serge 1656
				drm_dp_destroy_payload_step1(mgr, port, port->vcpi.vcpi, &mgr->payloads[i]);
5060 serge 1657
				req_payload.payload_state = mgr->payloads[i].payload_state;
5271 serge 1658
				mgr->payloads[i].start_slot = 0;
1659
			}
5060 serge 1660
			mgr->payloads[i].payload_state = req_payload.payload_state;
1661
		}
1662
		cur_slots += req_payload.num_slots;
1663
	}
5271 serge 1664
 
1665
	for (i = 0; i < mgr->max_payloads; i++) {
1666
		if (mgr->payloads[i].payload_state == DP_PAYLOAD_DELETE_LOCAL) {
1667
			DRM_DEBUG_KMS("removing payload %d\n", i);
1668
			for (j = i; j < mgr->max_payloads - 1; j++) {
1669
				memcpy(&mgr->payloads[j], &mgr->payloads[j + 1], sizeof(struct drm_dp_payload));
1670
				mgr->proposed_vcpis[j] = mgr->proposed_vcpis[j + 1];
1671
				if (mgr->proposed_vcpis[j] && mgr->proposed_vcpis[j]->num_slots) {
1672
					set_bit(j + 1, &mgr->payload_mask);
1673
				} else {
1674
					clear_bit(j + 1, &mgr->payload_mask);
1675
				}
1676
			}
1677
			memset(&mgr->payloads[mgr->max_payloads - 1], 0, sizeof(struct drm_dp_payload));
1678
			mgr->proposed_vcpis[mgr->max_payloads - 1] = NULL;
1679
			clear_bit(mgr->max_payloads, &mgr->payload_mask);
1680
 
1681
		}
1682
	}
5060 serge 1683
	mutex_unlock(&mgr->payload_lock);
1684
 
1685
	return 0;
1686
}
1687
EXPORT_SYMBOL(drm_dp_update_payload_part1);
1688
 
1689
/**
1690
 * drm_dp_update_payload_part2() - Execute payload update part 2
1691
 * @mgr: manager to use.
1692
 *
1693
 * This iterates over all proposed virtual channels, and tries to
1694
 * allocate space in the link for them. For 0->slots transitions,
1695
 * this step writes the remote VC payload commands. For slots->0
1696
 * this just resets some internal state.
1697
 */
1698
int drm_dp_update_payload_part2(struct drm_dp_mst_topology_mgr *mgr)
1699
{
1700
	struct drm_dp_mst_port *port;
1701
	int i;
1702
	int ret = 0;
1703
	mutex_lock(&mgr->payload_lock);
1704
	for (i = 0; i < mgr->max_payloads; i++) {
1705
 
1706
		if (!mgr->proposed_vcpis[i])
1707
			continue;
1708
 
1709
		port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
1710
 
1711
		DRM_DEBUG_KMS("payload %d %d\n", i, mgr->payloads[i].payload_state);
1712
		if (mgr->payloads[i].payload_state == DP_PAYLOAD_LOCAL) {
5271 serge 1713
			ret = drm_dp_create_payload_step2(mgr, port, mgr->proposed_vcpis[i]->vcpi, &mgr->payloads[i]);
5060 serge 1714
		} else if (mgr->payloads[i].payload_state == DP_PAYLOAD_DELETE_LOCAL) {
5271 serge 1715
			ret = drm_dp_destroy_payload_step2(mgr, mgr->proposed_vcpis[i]->vcpi, &mgr->payloads[i]);
5060 serge 1716
		}
1717
		if (ret) {
1718
			mutex_unlock(&mgr->payload_lock);
1719
			return ret;
1720
		}
1721
	}
1722
	mutex_unlock(&mgr->payload_lock);
1723
	return 0;
1724
}
1725
EXPORT_SYMBOL(drm_dp_update_payload_part2);
1726
 
1727
#if 0 /* unused as of yet */
1728
static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
1729
				 struct drm_dp_mst_port *port,
1730
				 int offset, int size)
1731
{
1732
	int len;
1733
	struct drm_dp_sideband_msg_tx *txmsg;
1734
 
1735
	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
1736
	if (!txmsg)
1737
		return -ENOMEM;
1738
 
1739
	len = build_dpcd_read(txmsg, port->port_num, 0, 8);
1740
	txmsg->dst = port->parent;
1741
 
1742
	drm_dp_queue_down_tx(mgr, txmsg);
1743
 
1744
	return 0;
1745
}
1746
#endif
1747
 
1748
static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
1749
				  struct drm_dp_mst_port *port,
1750
				  int offset, int size, u8 *bytes)
1751
{
1752
	int len;
1753
	int ret;
1754
	struct drm_dp_sideband_msg_tx *txmsg;
1755
	struct drm_dp_mst_branch *mstb;
1756
 
1757
	mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
1758
	if (!mstb)
1759
		return -EINVAL;
1760
 
1761
	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
1762
	if (!txmsg) {
1763
		ret = -ENOMEM;
1764
		goto fail_put;
1765
	}
1766
 
1767
	len = build_dpcd_write(txmsg, port->port_num, offset, size, bytes);
1768
	txmsg->dst = mstb;
1769
 
1770
	drm_dp_queue_down_tx(mgr, txmsg);
1771
 
1772
	ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
1773
	if (ret > 0) {
1774
		if (txmsg->reply.reply_type == 1) {
1775
			ret = -EINVAL;
1776
		} else
1777
			ret = 0;
1778
	}
1779
	kfree(txmsg);
1780
fail_put:
1781
	drm_dp_put_mst_branch_device(mstb);
1782
	return ret;
1783
}
1784
 
1785
static int drm_dp_encode_up_ack_reply(struct drm_dp_sideband_msg_tx *msg, u8 req_type)
1786
{
1787
	struct drm_dp_sideband_msg_reply_body reply;
1788
 
1789
	reply.reply_type = 1;
1790
	reply.req_type = req_type;
1791
	drm_dp_encode_sideband_reply(&reply, msg);
1792
	return 0;
1793
}
1794
 
1795
static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr,
1796
				    struct drm_dp_mst_branch *mstb,
1797
				    int req_type, int seqno, bool broadcast)
1798
{
1799
	struct drm_dp_sideband_msg_tx *txmsg;
1800
 
1801
	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
1802
	if (!txmsg)
1803
		return -ENOMEM;
1804
 
1805
	txmsg->dst = mstb;
1806
	txmsg->seqno = seqno;
1807
	drm_dp_encode_up_ack_reply(txmsg, req_type);
1808
 
1809
	mutex_lock(&mgr->qlock);
1810
	list_add_tail(&txmsg->next, &mgr->tx_msg_upq);
1811
	if (!mgr->tx_up_in_progress) {
1812
		process_single_up_tx_qlock(mgr);
1813
	}
1814
	mutex_unlock(&mgr->qlock);
1815
	return 0;
1816
}
1817
 
5271 serge 1818
static bool drm_dp_get_vc_payload_bw(int dp_link_bw,
1819
				     int dp_link_count,
1820
				     int *out)
5060 serge 1821
{
1822
	switch (dp_link_bw) {
5271 serge 1823
	default:
1824
		DRM_DEBUG_KMS("invalid link bandwidth in DPCD: %x (link count: %d)\n",
1825
			      dp_link_bw, dp_link_count);
1826
		return false;
1827
 
5060 serge 1828
	case DP_LINK_BW_1_62:
5271 serge 1829
		*out = 3 * dp_link_count;
1830
		break;
5060 serge 1831
	case DP_LINK_BW_2_7:
5271 serge 1832
		*out = 5 * dp_link_count;
1833
		break;
5060 serge 1834
	case DP_LINK_BW_5_4:
5271 serge 1835
		*out = 10 * dp_link_count;
1836
		break;
5060 serge 1837
	}
5271 serge 1838
	return true;
5060 serge 1839
}
1840
 
1841
/**
1842
 * drm_dp_mst_topology_mgr_set_mst() - Set the MST state for a topology manager
1843
 * @mgr: manager to set state for
1844
 * @mst_state: true to enable MST on this connector - false to disable.
1845
 *
1846
 * This is called by the driver when it detects an MST capable device plugged
1847
 * into a DP MST capable port, or when a DP MST capable device is unplugged.
1848
 */
1849
int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool mst_state)
1850
{
1851
	int ret = 0;
1852
	struct drm_dp_mst_branch *mstb = NULL;
1853
 
1854
	mutex_lock(&mgr->lock);
1855
	if (mst_state == mgr->mst_state)
1856
		goto out_unlock;
1857
 
1858
	mgr->mst_state = mst_state;
1859
	/* set the device into MST mode */
1860
	if (mst_state) {
1861
		WARN_ON(mgr->mst_primary);
1862
 
1863
		/* get dpcd info */
1864
		ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE);
1865
		if (ret != DP_RECEIVER_CAP_SIZE) {
1866
			DRM_DEBUG_KMS("failed to read DPCD\n");
1867
			goto out_unlock;
1868
		}
1869
 
5271 serge 1870
		if (!drm_dp_get_vc_payload_bw(mgr->dpcd[1],
1871
					      mgr->dpcd[2] & DP_MAX_LANE_COUNT_MASK,
1872
					      &mgr->pbn_div)) {
1873
			ret = -EINVAL;
1874
			goto out_unlock;
1875
		}
1876
 
5060 serge 1877
		mgr->total_pbn = 2560;
1878
		mgr->total_slots = DIV_ROUND_UP(mgr->total_pbn, mgr->pbn_div);
1879
		mgr->avail_slots = mgr->total_slots;
1880
 
1881
		/* add initial branch device at LCT 1 */
1882
		mstb = drm_dp_add_mst_branch_device(1, NULL);
1883
		if (mstb == NULL) {
1884
			ret = -ENOMEM;
1885
			goto out_unlock;
1886
		}
1887
		mstb->mgr = mgr;
1888
 
1889
		/* give this the main reference */
1890
		mgr->mst_primary = mstb;
1891
		kref_get(&mgr->mst_primary->kref);
1892
 
1893
		{
1894
			struct drm_dp_payload reset_pay;
1895
			reset_pay.start_slot = 0;
1896
			reset_pay.num_slots = 0x3f;
1897
			drm_dp_dpcd_write_payload(mgr, 0, &reset_pay);
1898
		}
1899
 
1900
		ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
1901
					 DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
1902
		if (ret < 0) {
1903
			goto out_unlock;
1904
		}
1905
 
1906
 
1907
		/* sort out guid */
1908
		ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, mgr->guid, 16);
1909
		if (ret != 16) {
1910
			DRM_DEBUG_KMS("failed to read DP GUID %d\n", ret);
1911
			goto out_unlock;
1912
		}
1913
 
1914
		mgr->guid_valid = drm_dp_validate_guid(mgr, mgr->guid);
1915
		if (!mgr->guid_valid) {
1916
			ret = drm_dp_dpcd_write(mgr->aux, DP_GUID, mgr->guid, 16);
1917
			mgr->guid_valid = true;
1918
		}
1919
 
1920
//       queue_work(system_long_wq, &mgr->work);
1921
 
1922
		ret = 0;
1923
	} else {
1924
		/* disable MST on the device */
1925
		mstb = mgr->mst_primary;
1926
		mgr->mst_primary = NULL;
1927
		/* this can fail if the device is gone */
1928
		drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 0);
1929
		ret = 0;
1930
		memset(mgr->payloads, 0, mgr->max_payloads * sizeof(struct drm_dp_payload));
1931
		mgr->payload_mask = 0;
1932
		set_bit(0, &mgr->payload_mask);
5271 serge 1933
		mgr->vcpi_mask = 0;
5060 serge 1934
	}
1935
 
1936
out_unlock:
1937
	mutex_unlock(&mgr->lock);
1938
	if (mstb)
1939
		drm_dp_put_mst_branch_device(mstb);
1940
	return ret;
1941
 
1942
}
1943
EXPORT_SYMBOL(drm_dp_mst_topology_mgr_set_mst);
1944
 
1945
/**
1946
 * drm_dp_mst_topology_mgr_suspend() - suspend the MST manager
1947
 * @mgr: manager to suspend
1948
 *
1949
 * This function tells the MST device that we can't handle UP messages
1950
 * anymore. This should stop it from sending any since we are suspended.
1951
 */
1952
void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr)
1953
{
1954
	mutex_lock(&mgr->lock);
1955
	drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
1956
			   DP_MST_EN | DP_UPSTREAM_IS_SRC);
1957
	mutex_unlock(&mgr->lock);
1958
}
1959
EXPORT_SYMBOL(drm_dp_mst_topology_mgr_suspend);
1960
 
1961
/**
1962
 * drm_dp_mst_topology_mgr_resume() - resume the MST manager
1963
 * @mgr: manager to resume
1964
 *
1965
 * This will fetch DPCD and see if the device is still there,
1966
 * if it is, it will rewrite the MSTM control bits, and return.
1967
 *
1968
 * if the device fails this returns -1, and the driver should do
1969
 * a full MST reprobe, in case we were undocked.
1970
 */
1971
int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr)
1972
{
1973
	int ret = 0;
1974
 
1975
	mutex_lock(&mgr->lock);
1976
 
1977
	if (mgr->mst_primary) {
1978
		int sret;
1979
		sret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE);
1980
		if (sret != DP_RECEIVER_CAP_SIZE) {
1981
			DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
1982
			ret = -1;
1983
			goto out_unlock;
1984
		}
1985
 
1986
		ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
1987
					 DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
1988
		if (ret < 0) {
1989
			DRM_DEBUG_KMS("mst write failed - undocked during suspend?\n");
1990
			ret = -1;
1991
			goto out_unlock;
1992
		}
1993
		ret = 0;
1994
	} else
1995
		ret = -1;
1996
 
1997
out_unlock:
1998
	mutex_unlock(&mgr->lock);
1999
	return ret;
2000
}
2001
EXPORT_SYMBOL(drm_dp_mst_topology_mgr_resume);
2002
 
2003
static void drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up)
2004
{
2005
	int len;
2006
	u8 replyblock[32];
2007
	int replylen, origlen, curreply;
2008
	int ret;
2009
	struct drm_dp_sideband_msg_rx *msg;
2010
	int basereg = up ? DP_SIDEBAND_MSG_UP_REQ_BASE : DP_SIDEBAND_MSG_DOWN_REP_BASE;
2011
	msg = up ? &mgr->up_req_recv : &mgr->down_rep_recv;
2012
 
2013
	len = min(mgr->max_dpcd_transaction_bytes, 16);
2014
	ret = drm_dp_dpcd_read(mgr->aux, basereg,
2015
			       replyblock, len);
2016
	if (ret != len) {
2017
		DRM_DEBUG_KMS("failed to read DPCD down rep %d %d\n", len, ret);
2018
		return;
2019
	}
2020
	ret = drm_dp_sideband_msg_build(msg, replyblock, len, true);
2021
	if (!ret) {
2022
		DRM_DEBUG_KMS("sideband msg build failed %d\n", replyblock[0]);
2023
		return;
2024
	}
2025
	replylen = msg->curchunk_len + msg->curchunk_hdrlen;
2026
 
2027
	origlen = replylen;
2028
	replylen -= len;
2029
	curreply = len;
2030
	while (replylen > 0) {
2031
		len = min3(replylen, mgr->max_dpcd_transaction_bytes, 16);
2032
		ret = drm_dp_dpcd_read(mgr->aux, basereg + curreply,
2033
				    replyblock, len);
2034
		if (ret != len) {
2035
			DRM_DEBUG_KMS("failed to read a chunk\n");
2036
		}
2037
		ret = drm_dp_sideband_msg_build(msg, replyblock, len, false);
2038
		if (ret == false)
2039
			DRM_DEBUG_KMS("failed to build sideband msg\n");
2040
		curreply += len;
2041
		replylen -= len;
2042
	}
2043
}
2044
 
2045
static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
2046
{
2047
	int ret = 0;
2048
 
2049
	drm_dp_get_one_sb_msg(mgr, false);
2050
 
2051
	if (mgr->down_rep_recv.have_eomt) {
2052
		struct drm_dp_sideband_msg_tx *txmsg;
2053
		struct drm_dp_mst_branch *mstb;
2054
		int slot = -1;
2055
		mstb = drm_dp_get_mst_branch_device(mgr,
2056
						    mgr->down_rep_recv.initial_hdr.lct,
2057
						    mgr->down_rep_recv.initial_hdr.rad);
2058
 
2059
		if (!mstb) {
2060
			DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->down_rep_recv.initial_hdr.lct);
2061
			memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2062
			return 0;
2063
		}
2064
 
2065
		/* find the message */
2066
		slot = mgr->down_rep_recv.initial_hdr.seqno;
2067
		mutex_lock(&mgr->qlock);
2068
		txmsg = mstb->tx_slots[slot];
2069
		/* remove from slots */
2070
		mutex_unlock(&mgr->qlock);
2071
 
2072
		if (!txmsg) {
2073
			DRM_DEBUG_KMS("Got MST reply with no msg %p %d %d %02x %02x\n",
2074
			       mstb,
2075
			       mgr->down_rep_recv.initial_hdr.seqno,
2076
			       mgr->down_rep_recv.initial_hdr.lct,
2077
				      mgr->down_rep_recv.initial_hdr.rad[0],
2078
				      mgr->down_rep_recv.msg[0]);
2079
			drm_dp_put_mst_branch_device(mstb);
2080
			memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2081
			return 0;
2082
		}
2083
 
2084
		drm_dp_sideband_parse_reply(&mgr->down_rep_recv, &txmsg->reply);
2085
		if (txmsg->reply.reply_type == 1) {
2086
			DRM_DEBUG_KMS("Got NAK reply: req 0x%02x, reason 0x%02x, nak data 0x%02x\n", txmsg->reply.req_type, txmsg->reply.u.nak.reason, txmsg->reply.u.nak.nak_data);
2087
		}
2088
 
2089
		memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2090
		drm_dp_put_mst_branch_device(mstb);
2091
 
2092
		mutex_lock(&mgr->qlock);
2093
		txmsg->state = DRM_DP_SIDEBAND_TX_RX;
2094
		mstb->tx_slots[slot] = NULL;
2095
		mutex_unlock(&mgr->qlock);
2096
 
2097
//       wake_up(&mgr->tx_waitq);
2098
	}
2099
	return ret;
2100
}
2101
 
2102
static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
2103
{
2104
	int ret = 0;
2105
	drm_dp_get_one_sb_msg(mgr, true);
2106
 
2107
	if (mgr->up_req_recv.have_eomt) {
2108
		struct drm_dp_sideband_msg_req_body msg;
2109
		struct drm_dp_mst_branch *mstb;
2110
		bool seqno;
2111
		mstb = drm_dp_get_mst_branch_device(mgr,
2112
						    mgr->up_req_recv.initial_hdr.lct,
2113
						    mgr->up_req_recv.initial_hdr.rad);
2114
		if (!mstb) {
2115
			DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
2116
			memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2117
			return 0;
2118
		}
2119
 
2120
		seqno = mgr->up_req_recv.initial_hdr.seqno;
2121
		drm_dp_sideband_parse_req(&mgr->up_req_recv, &msg);
2122
 
2123
		if (msg.req_type == DP_CONNECTION_STATUS_NOTIFY) {
2124
			drm_dp_send_up_ack_reply(mgr, mstb, msg.req_type, seqno, false);
2125
			drm_dp_update_port(mstb, &msg.u.conn_stat);
2126
			DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n", msg.u.conn_stat.port_number, msg.u.conn_stat.legacy_device_plug_status, msg.u.conn_stat.displayport_device_plug_status, msg.u.conn_stat.message_capability_status, msg.u.conn_stat.input_port, msg.u.conn_stat.peer_device_type);
2127
			(*mgr->cbs->hotplug)(mgr);
2128
 
2129
		} else if (msg.req_type == DP_RESOURCE_STATUS_NOTIFY) {
2130
			drm_dp_send_up_ack_reply(mgr, mstb, msg.req_type, seqno, false);
2131
			DRM_DEBUG_KMS("Got RSN: pn: %d avail_pbn %d\n", msg.u.resource_stat.port_number, msg.u.resource_stat.available_pbn);
2132
		}
2133
 
2134
		drm_dp_put_mst_branch_device(mstb);
2135
		memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2136
	}
2137
	return ret;
2138
}
2139
 
2140
/**
2141
 * drm_dp_mst_hpd_irq() - MST hotplug IRQ notify
2142
 * @mgr: manager to notify irq for.
2143
 * @esi: 4 bytes from SINK_COUNT_ESI
5271 serge 2144
 * @handled: whether the hpd interrupt was consumed or not
5060 serge 2145
 *
2146
 * This should be called from the driver when it detects a short IRQ,
2147
 * along with the value of the DEVICE_SERVICE_IRQ_VECTOR_ESI0. The
2148
 * topology manager will process the sideband messages received as a result
2149
 * of this.
2150
 */
2151
int drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr *mgr, u8 *esi, bool *handled)
2152
{
2153
	int ret = 0;
2154
	int sc;
2155
	*handled = false;
2156
	sc = esi[0] & 0x3f;
2157
 
2158
	if (sc != mgr->sink_count) {
2159
		mgr->sink_count = sc;
2160
		*handled = true;
2161
	}
2162
 
2163
	if (esi[1] & DP_DOWN_REP_MSG_RDY) {
2164
		ret = drm_dp_mst_handle_down_rep(mgr);
2165
		*handled = true;
2166
	}
2167
 
2168
	if (esi[1] & DP_UP_REQ_MSG_RDY) {
2169
		ret |= drm_dp_mst_handle_up_req(mgr);
2170
		*handled = true;
2171
	}
2172
 
2173
	drm_dp_mst_kick_tx(mgr);
2174
	return ret;
2175
}
2176
EXPORT_SYMBOL(drm_dp_mst_hpd_irq);
2177
 
2178
/**
2179
 * drm_dp_mst_detect_port() - get connection status for an MST port
2180
 * @mgr: manager for this port
2181
 * @port: unverified pointer to a port
2182
 *
2183
 * This returns the current connection state for a port. It validates the
2184
 * port pointer still exists so the caller doesn't require a reference
2185
 */
5271 serge 2186
enum drm_connector_status drm_dp_mst_detect_port(struct drm_connector *connector,
2187
						 struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
5060 serge 2188
{
2189
	enum drm_connector_status status = connector_status_disconnected;
2190
 
2191
	/* we need to search for the port in the mgr in case its gone */
2192
	port = drm_dp_get_validated_port_ref(mgr, port);
2193
	if (!port)
2194
		return connector_status_disconnected;
2195
 
2196
	if (!port->ddps)
2197
		goto out;
2198
 
2199
	switch (port->pdt) {
2200
	case DP_PEER_DEVICE_NONE:
2201
	case DP_PEER_DEVICE_MST_BRANCHING:
2202
		break;
2203
 
2204
	case DP_PEER_DEVICE_SST_SINK:
2205
		status = connector_status_connected;
5271 serge 2206
		/* for logical ports - cache the EDID */
2207
		if (port->port_num >= 8 && !port->cached_edid) {
2208
			port->cached_edid = drm_get_edid(connector, &port->aux.ddc);
2209
		}
5060 serge 2210
		break;
2211
	case DP_PEER_DEVICE_DP_LEGACY_CONV:
2212
		if (port->ldps)
2213
			status = connector_status_connected;
2214
		break;
2215
	}
2216
out:
2217
	drm_dp_put_port(port);
2218
	return status;
2219
}
2220
EXPORT_SYMBOL(drm_dp_mst_detect_port);
2221
 
2222
/**
2223
 * drm_dp_mst_get_edid() - get EDID for an MST port
2224
 * @connector: toplevel connector to get EDID for
2225
 * @mgr: manager for this port
2226
 * @port: unverified pointer to a port.
2227
 *
2228
 * This returns an EDID for the port connected to a connector,
2229
 * It validates the pointer still exists so the caller doesn't require a
2230
 * reference.
2231
 */
2232
struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
2233
{
2234
	struct edid *edid = NULL;
2235
 
2236
	/* we need to search for the port in the mgr in case its gone */
2237
	port = drm_dp_get_validated_port_ref(mgr, port);
2238
	if (!port)
2239
		return NULL;
2240
 
5271 serge 2241
	if (port->cached_edid)
2242
		edid = drm_edid_duplicate(port->cached_edid);
2243
	else
5060 serge 2244
	edid = drm_get_edid(connector, &port->aux.ddc);
5271 serge 2245
 
2246
	drm_mode_connector_set_tile_property(connector);
5060 serge 2247
	drm_dp_put_port(port);
2248
	return edid;
2249
}
2250
EXPORT_SYMBOL(drm_dp_mst_get_edid);
2251
 
2252
/**
2253
 * drm_dp_find_vcpi_slots() - find slots for this PBN value
2254
 * @mgr: manager to use
2255
 * @pbn: payload bandwidth to convert into slots.
2256
 */
2257
int drm_dp_find_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr,
2258
			   int pbn)
2259
{
2260
	int num_slots;
2261
 
2262
	num_slots = DIV_ROUND_UP(pbn, mgr->pbn_div);
2263
 
2264
	if (num_slots > mgr->avail_slots)
2265
		return -ENOSPC;
2266
	return num_slots;
2267
}
2268
EXPORT_SYMBOL(drm_dp_find_vcpi_slots);
2269
 
2270
static int drm_dp_init_vcpi(struct drm_dp_mst_topology_mgr *mgr,
2271
			    struct drm_dp_vcpi *vcpi, int pbn)
2272
{
2273
	int num_slots;
2274
	int ret;
2275
 
2276
	num_slots = DIV_ROUND_UP(pbn, mgr->pbn_div);
2277
 
2278
	if (num_slots > mgr->avail_slots)
2279
		return -ENOSPC;
2280
 
2281
	vcpi->pbn = pbn;
2282
	vcpi->aligned_pbn = num_slots * mgr->pbn_div;
2283
	vcpi->num_slots = num_slots;
2284
 
2285
	ret = drm_dp_mst_assign_payload_id(mgr, vcpi);
2286
	if (ret < 0)
2287
		return ret;
2288
	return 0;
2289
}
2290
 
2291
/**
2292
 * drm_dp_mst_allocate_vcpi() - Allocate a virtual channel
2293
 * @mgr: manager for this port
2294
 * @port: port to allocate a virtual channel for.
2295
 * @pbn: payload bandwidth number to request
2296
 * @slots: returned number of slots for this PBN.
2297
 */
2298
bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, int pbn, int *slots)
2299
{
2300
	int ret;
2301
 
2302
	port = drm_dp_get_validated_port_ref(mgr, port);
2303
	if (!port)
2304
		return false;
2305
 
2306
	if (port->vcpi.vcpi > 0) {
2307
		DRM_DEBUG_KMS("payload: vcpi %d already allocated for pbn %d - requested pbn %d\n", port->vcpi.vcpi, port->vcpi.pbn, pbn);
2308
		if (pbn == port->vcpi.pbn) {
2309
			*slots = port->vcpi.num_slots;
2310
			return true;
2311
		}
2312
	}
2313
 
2314
	ret = drm_dp_init_vcpi(mgr, &port->vcpi, pbn);
2315
	if (ret) {
2316
		DRM_DEBUG_KMS("failed to init vcpi %d %d %d\n", DIV_ROUND_UP(pbn, mgr->pbn_div), mgr->avail_slots, ret);
2317
		goto out;
2318
	}
2319
	DRM_DEBUG_KMS("initing vcpi for %d %d\n", pbn, port->vcpi.num_slots);
2320
	*slots = port->vcpi.num_slots;
2321
 
2322
	drm_dp_put_port(port);
2323
	return true;
2324
out:
2325
	return false;
2326
}
2327
EXPORT_SYMBOL(drm_dp_mst_allocate_vcpi);
2328
 
2329
/**
2330
 * drm_dp_mst_reset_vcpi_slots() - Reset number of slots to 0 for VCPI
2331
 * @mgr: manager for this port
2332
 * @port: unverified pointer to a port.
2333
 *
2334
 * This just resets the number of slots for the ports VCPI for later programming.
2335
 */
2336
void drm_dp_mst_reset_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
2337
{
2338
	port = drm_dp_get_validated_port_ref(mgr, port);
2339
	if (!port)
2340
		return;
2341
	port->vcpi.num_slots = 0;
2342
	drm_dp_put_port(port);
2343
}
2344
EXPORT_SYMBOL(drm_dp_mst_reset_vcpi_slots);
2345
 
2346
/**
2347
 * drm_dp_mst_deallocate_vcpi() - deallocate a VCPI
2348
 * @mgr: manager for this port
2349
 * @port: unverified port to deallocate vcpi for
2350
 */
2351
void drm_dp_mst_deallocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
2352
{
2353
	port = drm_dp_get_validated_port_ref(mgr, port);
2354
	if (!port)
2355
		return;
2356
 
2357
	drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
2358
	port->vcpi.num_slots = 0;
2359
	port->vcpi.pbn = 0;
2360
	port->vcpi.aligned_pbn = 0;
2361
	port->vcpi.vcpi = 0;
2362
	drm_dp_put_port(port);
2363
}
2364
EXPORT_SYMBOL(drm_dp_mst_deallocate_vcpi);
2365
 
2366
static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
2367
				     int id, struct drm_dp_payload *payload)
2368
{
2369
	u8 payload_alloc[3], status;
2370
	int ret;
2371
	int retries = 0;
2372
 
2373
	drm_dp_dpcd_writeb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS,
2374
			   DP_PAYLOAD_TABLE_UPDATED);
2375
 
2376
	payload_alloc[0] = id;
2377
	payload_alloc[1] = payload->start_slot;
2378
	payload_alloc[2] = payload->num_slots;
2379
 
2380
	ret = drm_dp_dpcd_write(mgr->aux, DP_PAYLOAD_ALLOCATE_SET, payload_alloc, 3);
2381
	if (ret != 3) {
2382
		DRM_DEBUG_KMS("failed to write payload allocation %d\n", ret);
2383
		goto fail;
2384
	}
2385
 
2386
retry:
2387
	ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
2388
	if (ret < 0) {
2389
		DRM_DEBUG_KMS("failed to read payload table status %d\n", ret);
2390
		goto fail;
2391
	}
2392
 
2393
	if (!(status & DP_PAYLOAD_TABLE_UPDATED)) {
2394
		retries++;
2395
		if (retries < 20) {
2396
			usleep_range(10000, 20000);
2397
			goto retry;
2398
		}
2399
		DRM_DEBUG_KMS("status not set after read payload table status %d\n", status);
2400
		ret = -EINVAL;
2401
		goto fail;
2402
	}
2403
	ret = 0;
2404
fail:
2405
	return ret;
2406
}
2407
 
2408
 
2409
/**
2410
 * drm_dp_check_act_status() - Check ACT handled status.
2411
 * @mgr: manager to use
2412
 *
2413
 * Check the payload status bits in the DPCD for ACT handled completion.
2414
 */
2415
int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr)
2416
{
2417
	u8 status;
2418
	int ret;
2419
	int count = 0;
2420
 
2421
	do {
2422
		ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
2423
 
2424
		if (ret < 0) {
2425
			DRM_DEBUG_KMS("failed to read payload table status %d\n", ret);
2426
			goto fail;
2427
		}
2428
 
2429
		if (status & DP_PAYLOAD_ACT_HANDLED)
2430
			break;
2431
		count++;
2432
		udelay(100);
2433
 
2434
	} while (count < 30);
2435
 
2436
	if (!(status & DP_PAYLOAD_ACT_HANDLED)) {
2437
		DRM_DEBUG_KMS("failed to get ACT bit %d after %d retries\n", status, count);
2438
		ret = -EINVAL;
2439
		goto fail;
2440
	}
2441
	return 0;
2442
fail:
2443
	return ret;
2444
}
2445
EXPORT_SYMBOL(drm_dp_check_act_status);
2446
 
2447
/**
2448
 * drm_dp_calc_pbn_mode() - Calculate the PBN for a mode.
2449
 * @clock: dot clock for the mode
2450
 * @bpp: bpp for the mode.
2451
 *
2452
 * This uses the formula in the spec to calculate the PBN value for a mode.
2453
 */
2454
int drm_dp_calc_pbn_mode(int clock, int bpp)
2455
{
2456
	fixed20_12 pix_bw;
2457
	fixed20_12 fbpp;
2458
	fixed20_12 result;
2459
	fixed20_12 margin, tmp;
2460
	u32 res;
2461
 
2462
	pix_bw.full = dfixed_const(clock);
2463
	fbpp.full = dfixed_const(bpp);
2464
	tmp.full = dfixed_const(8);
2465
	fbpp.full = dfixed_div(fbpp, tmp);
2466
 
2467
	result.full = dfixed_mul(pix_bw, fbpp);
2468
	margin.full = dfixed_const(54);
2469
	tmp.full = dfixed_const(64);
2470
	margin.full = dfixed_div(margin, tmp);
2471
	result.full = dfixed_div(result, margin);
2472
 
2473
	margin.full = dfixed_const(1006);
2474
	tmp.full = dfixed_const(1000);
2475
	margin.full = dfixed_div(margin, tmp);
2476
	result.full = dfixed_mul(result, margin);
2477
 
2478
	result.full = dfixed_div(result, tmp);
2479
	result.full = dfixed_ceil(result);
2480
	res = dfixed_trunc(result);
2481
	return res;
2482
}
2483
EXPORT_SYMBOL(drm_dp_calc_pbn_mode);
2484
 
2485
static int test_calc_pbn_mode(void)
2486
{
2487
	int ret;
2488
	ret = drm_dp_calc_pbn_mode(154000, 30);
2489
	if (ret != 689)
2490
		return -EINVAL;
2491
	ret = drm_dp_calc_pbn_mode(234000, 30);
2492
	if (ret != 1047)
2493
		return -EINVAL;
2494
	return 0;
2495
}
2496
 
2497
/* we want to kick the TX after we've ack the up/down IRQs. */
2498
static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr)
2499
{
2500
//   queue_work(system_long_wq, &mgr->tx_work);
2501
}
2502
 
2503
static void drm_dp_mst_dump_mstb(struct seq_file *m,
2504
				 struct drm_dp_mst_branch *mstb)
2505
{
2506
	struct drm_dp_mst_port *port;
2507
	int tabs = mstb->lct;
2508
	char prefix[10];
2509
	int i;
2510
 
2511
	for (i = 0; i < tabs; i++)
2512
		prefix[i] = '\t';
2513
	prefix[i] = '\0';
2514
 
2515
//   seq_printf(m, "%smst: %p, %d\n", prefix, mstb, mstb->num_ports);
2516
//   list_for_each_entry(port, &mstb->ports, next) {
2517
//       seq_printf(m, "%sport: %d: ddps: %d ldps: %d, %p, conn: %p\n", prefix, port->port_num, port->ddps, port->ldps, port, port->connector);
2518
//       if (port->mstb)
2519
//           drm_dp_mst_dump_mstb(m, port->mstb);
2520
//   }
2521
}
2522
 
2523
static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
2524
				  char *buf)
2525
{
2526
	int ret;
2527
	int i;
2528
	for (i = 0; i < 4; i++) {
2529
		ret = drm_dp_dpcd_read(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS + (i * 16), &buf[i * 16], 16);
2530
		if (ret != 16)
2531
			break;
2532
	}
2533
	if (i == 4)
2534
		return true;
2535
	return false;
2536
}
2537
 
2538
/**
2539
 * drm_dp_mst_dump_topology(): dump topology to seq file.
2540
 * @m: seq_file to dump output to
2541
 * @mgr: manager to dump current topology for.
2542
 *
2543
 * helper to dump MST topology to a seq file for debugfs.
2544
 */
2545
void drm_dp_mst_dump_topology(struct seq_file *m,
2546
			      struct drm_dp_mst_topology_mgr *mgr)
2547
{
2548
	int i;
2549
	struct drm_dp_mst_port *port;
2550
	mutex_lock(&mgr->lock);
2551
	if (mgr->mst_primary)
2552
		drm_dp_mst_dump_mstb(m, mgr->mst_primary);
2553
 
2554
	/* dump VCPIs */
2555
	mutex_unlock(&mgr->lock);
2556
 
2557
 
2558
 
2559
}
2560
EXPORT_SYMBOL(drm_dp_mst_dump_topology);
2561
 
2562
static void drm_dp_tx_work(struct work_struct *work)
2563
{
2564
	struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, tx_work);
2565
 
2566
	mutex_lock(&mgr->qlock);
2567
	if (mgr->tx_down_in_progress)
2568
		process_single_down_tx_qlock(mgr);
2569
	mutex_unlock(&mgr->qlock);
2570
}
2571
 
2572
/**
2573
 * drm_dp_mst_topology_mgr_init - initialise a topology manager
2574
 * @mgr: manager struct to initialise
2575
 * @dev: device providing this structure - for i2c addition.
2576
 * @aux: DP helper aux channel to talk to this device
2577
 * @max_dpcd_transaction_bytes: hw specific DPCD transaction limit
2578
 * @max_payloads: maximum number of payloads this GPU can source
2579
 * @conn_base_id: the connector object ID the MST device is connected to.
2580
 *
2581
 * Return 0 for success, or negative error code on failure
2582
 */
2583
int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
2584
				 struct device *dev, struct drm_dp_aux *aux,
2585
				 int max_dpcd_transaction_bytes,
2586
				 int max_payloads, int conn_base_id)
2587
{
2588
	mutex_init(&mgr->lock);
2589
	mutex_init(&mgr->qlock);
2590
	mutex_init(&mgr->payload_lock);
2591
	INIT_LIST_HEAD(&mgr->tx_msg_upq);
2592
	INIT_LIST_HEAD(&mgr->tx_msg_downq);
2593
	INIT_WORK(&mgr->work, drm_dp_mst_link_probe_work);
2594
	INIT_WORK(&mgr->tx_work, drm_dp_tx_work);
2595
//   init_waitqueue_head(&mgr->tx_waitq);
2596
	mgr->dev = dev;
2597
	mgr->aux = aux;
2598
	mgr->max_dpcd_transaction_bytes = max_dpcd_transaction_bytes;
2599
	mgr->max_payloads = max_payloads;
2600
	mgr->conn_base_id = conn_base_id;
2601
	mgr->payloads = kcalloc(max_payloads, sizeof(struct drm_dp_payload), GFP_KERNEL);
2602
	if (!mgr->payloads)
2603
		return -ENOMEM;
2604
	mgr->proposed_vcpis = kcalloc(max_payloads, sizeof(struct drm_dp_vcpi *), GFP_KERNEL);
2605
	if (!mgr->proposed_vcpis)
2606
		return -ENOMEM;
2607
	set_bit(0, &mgr->payload_mask);
2608
	test_calc_pbn_mode();
2609
	return 0;
2610
}
2611
EXPORT_SYMBOL(drm_dp_mst_topology_mgr_init);
2612
 
2613
/**
2614
 * drm_dp_mst_topology_mgr_destroy() - destroy topology manager.
2615
 * @mgr: manager to destroy
2616
 */
2617
void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr)
2618
{
2619
	mutex_lock(&mgr->payload_lock);
2620
	kfree(mgr->payloads);
2621
	mgr->payloads = NULL;
2622
	kfree(mgr->proposed_vcpis);
2623
	mgr->proposed_vcpis = NULL;
2624
	mutex_unlock(&mgr->payload_lock);
2625
	mgr->dev = NULL;
2626
	mgr->aux = NULL;
2627
}
2628
EXPORT_SYMBOL(drm_dp_mst_topology_mgr_destroy);
2629
 
2630
/* I2C device */
2631
static int drm_dp_mst_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
2632
			       int num)
2633
{
2634
	struct drm_dp_aux *aux = adapter->algo_data;
2635
	struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port, aux);
2636
	struct drm_dp_mst_branch *mstb;
2637
	struct drm_dp_mst_topology_mgr *mgr = port->mgr;
2638
	unsigned int i;
2639
	bool reading = false;
2640
	struct drm_dp_sideband_msg_req_body msg;
2641
	struct drm_dp_sideband_msg_tx *txmsg = NULL;
2642
	int ret;
2643
 
2644
	mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
2645
	if (!mstb)
2646
		return -EREMOTEIO;
2647
 
2648
	/* construct i2c msg */
2649
	/* see if last msg is a read */
2650
	if (msgs[num - 1].flags & I2C_M_RD)
2651
		reading = true;
2652
 
2653
	if (!reading) {
2654
		DRM_DEBUG_KMS("Unsupported I2C transaction for MST device\n");
2655
		ret = -EIO;
2656
		goto out;
2657
	}
2658
 
2659
	msg.req_type = DP_REMOTE_I2C_READ;
2660
	msg.u.i2c_read.num_transactions = num - 1;
2661
	msg.u.i2c_read.port_number = port->port_num;
2662
	for (i = 0; i < num - 1; i++) {
2663
		msg.u.i2c_read.transactions[i].i2c_dev_id = msgs[i].addr;
2664
		msg.u.i2c_read.transactions[i].num_bytes = msgs[i].len;
2665
		msg.u.i2c_read.transactions[i].bytes = msgs[i].buf;
2666
	}
2667
	msg.u.i2c_read.read_i2c_device_id = msgs[num - 1].addr;
2668
	msg.u.i2c_read.num_bytes_read = msgs[num - 1].len;
2669
 
2670
	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
2671
	if (!txmsg) {
2672
		ret = -ENOMEM;
2673
		goto out;
2674
	}
2675
 
2676
	txmsg->dst = mstb;
2677
	drm_dp_encode_sideband_req(&msg, txmsg);
2678
 
2679
	drm_dp_queue_down_tx(mgr, txmsg);
2680
 
2681
	ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
2682
	if (ret > 0) {
2683
 
2684
		if (txmsg->reply.reply_type == 1) { /* got a NAK back */
2685
			ret = -EREMOTEIO;
2686
			goto out;
2687
		}
2688
		if (txmsg->reply.u.remote_i2c_read_ack.num_bytes != msgs[num - 1].len) {
2689
			ret = -EIO;
2690
			goto out;
2691
		}
2692
		memcpy(msgs[num - 1].buf, txmsg->reply.u.remote_i2c_read_ack.bytes, msgs[num - 1].len);
2693
		ret = num;
2694
	}
2695
out:
2696
	kfree(txmsg);
2697
	drm_dp_put_mst_branch_device(mstb);
2698
	return ret;
2699
}
2700
 
2701
static u32 drm_dp_mst_i2c_functionality(struct i2c_adapter *adapter)
2702
{
2703
	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
2704
	       I2C_FUNC_SMBUS_READ_BLOCK_DATA |
2705
	       I2C_FUNC_SMBUS_BLOCK_PROC_CALL |
2706
	       I2C_FUNC_10BIT_ADDR;
2707
}
2708
 
2709
static const struct i2c_algorithm drm_dp_mst_i2c_algo = {
2710
	.functionality = drm_dp_mst_i2c_functionality,
2711
	.master_xfer = drm_dp_mst_i2c_xfer,
2712
};
2713
 
2714
/**
2715
 * drm_dp_mst_register_i2c_bus() - register an I2C adapter for I2C-over-AUX
2716
 * @aux: DisplayPort AUX channel
2717
 *
2718
 * Returns 0 on success or a negative error code on failure.
2719
 */
2720
static int drm_dp_mst_register_i2c_bus(struct drm_dp_aux *aux)
2721
{
2722
	aux->ddc.algo = &drm_dp_mst_i2c_algo;
2723
	aux->ddc.algo_data = aux;
2724
	aux->ddc.retries = 3;
2725
 
2726
	aux->ddc.class = I2C_CLASS_DDC;
2727
	aux->ddc.owner = THIS_MODULE;
2728
	aux->ddc.dev.parent = aux->dev;
2729
 
2730
	return i2c_add_adapter(&aux->ddc);
2731
}
2732
 
2733
/**
2734
 * drm_dp_mst_unregister_i2c_bus() - unregister an I2C-over-AUX adapter
2735
 * @aux: DisplayPort AUX channel
2736
 */
2737
static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_aux *aux)
2738
{
2739
	i2c_del_adapter(&aux->ddc);
2740
}