Subversion Repositories Kolibri OS

Rev

Rev 5271 | Rev 6088 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
5060 serge 1
/*
2
 * Copyright © 2014 Red Hat
3
 *
4
 * Permission to use, copy, modify, distribute, and sell this software and its
5
 * documentation for any purpose is hereby granted without fee, provided that
6
 * the above copyright notice appear in all copies and that both that copyright
7
 * notice and this permission notice appear in supporting documentation, and
8
 * that the name of the copyright holders not be used in advertising or
9
 * publicity pertaining to distribution of the software without specific,
10
 * written prior permission.  The copyright holders make no representations
11
 * about the suitability of this software for any purpose.  It is provided "as
12
 * is" without express or implied warranty.
13
 *
14
 * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
15
 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
16
 * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
17
 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
18
 * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
19
 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
20
 * OF THIS SOFTWARE.
21
 */
22
 
23
#include 
24
#include 
25
#include 
26
#include 
27
#include 
28
#include 
29
#include 
30
#include 
31
#include 
32
#include 
33
#include 
34
#include 
35
 
36
#include 
37
 
38
u64 get_jiffies_64(void)
39
{
40
    return jiffies;
41
}
42
/**
43
 * DOC: dp mst helper
44
 *
45
 * These functions contain parts of the DisplayPort 1.2a MultiStream Transport
46
 * protocol. The helpers contain a topology manager and bandwidth manager.
47
 * The helpers encapsulate the sending and received of sideband msgs.
48
 */
49
static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
50
				  char *buf);
51
static int test_calc_pbn_mode(void);
52
 
53
static void drm_dp_put_port(struct drm_dp_mst_port *port);
54
 
55
static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
56
				     int id,
57
				     struct drm_dp_payload *payload);
58
 
59
static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
60
				  struct drm_dp_mst_port *port,
61
				  int offset, int size, u8 *bytes);
62
 
6084 serge 63
static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
64
				     struct drm_dp_mst_branch *mstb);
5060 serge 65
static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
66
					   struct drm_dp_mst_branch *mstb,
67
					   struct drm_dp_mst_port *port);
68
static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
69
				 u8 *guid);
70
 
71
static int drm_dp_mst_register_i2c_bus(struct drm_dp_aux *aux);
72
static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_aux *aux);
73
static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr);
74
/* sideband msg handling */
75
static u8 drm_dp_msg_header_crc4(const uint8_t *data, size_t num_nibbles)
76
{
77
	u8 bitmask = 0x80;
78
	u8 bitshift = 7;
79
	u8 array_index = 0;
80
	int number_of_bits = num_nibbles * 4;
81
	u8 remainder = 0;
82
 
83
	while (number_of_bits != 0) {
84
		number_of_bits--;
85
		remainder <<= 1;
86
		remainder |= (data[array_index] & bitmask) >> bitshift;
87
		bitmask >>= 1;
88
		bitshift--;
89
		if (bitmask == 0) {
90
			bitmask = 0x80;
91
			bitshift = 7;
92
			array_index++;
93
		}
94
		if ((remainder & 0x10) == 0x10)
95
			remainder ^= 0x13;
96
	}
97
 
98
	number_of_bits = 4;
99
	while (number_of_bits != 0) {
100
		number_of_bits--;
101
		remainder <<= 1;
102
		if ((remainder & 0x10) != 0)
103
			remainder ^= 0x13;
104
	}
105
 
106
	return remainder;
107
}
108
 
109
static u8 drm_dp_msg_data_crc4(const uint8_t *data, u8 number_of_bytes)
110
{
111
	u8 bitmask = 0x80;
112
	u8 bitshift = 7;
113
	u8 array_index = 0;
114
	int number_of_bits = number_of_bytes * 8;
115
	u16 remainder = 0;
116
 
117
	while (number_of_bits != 0) {
118
		number_of_bits--;
119
		remainder <<= 1;
120
		remainder |= (data[array_index] & bitmask) >> bitshift;
121
		bitmask >>= 1;
122
		bitshift--;
123
		if (bitmask == 0) {
124
			bitmask = 0x80;
125
			bitshift = 7;
126
			array_index++;
127
		}
128
		if ((remainder & 0x100) == 0x100)
129
			remainder ^= 0xd5;
130
	}
131
 
132
	number_of_bits = 8;
133
	while (number_of_bits != 0) {
134
		number_of_bits--;
135
		remainder <<= 1;
136
		if ((remainder & 0x100) != 0)
137
			remainder ^= 0xd5;
138
	}
139
 
140
	return remainder & 0xff;
141
}
142
static inline u8 drm_dp_calc_sb_hdr_size(struct drm_dp_sideband_msg_hdr *hdr)
143
{
144
	u8 size = 3;
145
	size += (hdr->lct / 2);
146
	return size;
147
}
148
 
149
static void drm_dp_encode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr,
150
					   u8 *buf, int *len)
151
{
152
	int idx = 0;
153
	int i;
154
	u8 crc4;
155
	buf[idx++] = ((hdr->lct & 0xf) << 4) | (hdr->lcr & 0xf);
156
	for (i = 0; i < (hdr->lct / 2); i++)
157
		buf[idx++] = hdr->rad[i];
158
	buf[idx++] = (hdr->broadcast << 7) | (hdr->path_msg << 6) |
159
		(hdr->msg_len & 0x3f);
160
	buf[idx++] = (hdr->somt << 7) | (hdr->eomt << 6) | (hdr->seqno << 4);
161
 
162
	crc4 = drm_dp_msg_header_crc4(buf, (idx * 2) - 1);
163
	buf[idx - 1] |= (crc4 & 0xf);
164
 
165
	*len = idx;
166
}
167
 
168
static bool drm_dp_decode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr,
169
					   u8 *buf, int buflen, u8 *hdrlen)
170
{
171
	u8 crc4;
172
	u8 len;
173
	int i;
174
	u8 idx;
175
	if (buf[0] == 0)
176
		return false;
177
	len = 3;
178
	len += ((buf[0] & 0xf0) >> 4) / 2;
179
	if (len > buflen)
180
		return false;
181
	crc4 = drm_dp_msg_header_crc4(buf, (len * 2) - 1);
182
 
183
	if ((crc4 & 0xf) != (buf[len - 1] & 0xf)) {
184
		DRM_DEBUG_KMS("crc4 mismatch 0x%x 0x%x\n", crc4, buf[len - 1]);
185
		return false;
186
	}
187
 
188
	hdr->lct = (buf[0] & 0xf0) >> 4;
189
	hdr->lcr = (buf[0] & 0xf);
190
	idx = 1;
191
	for (i = 0; i < (hdr->lct / 2); i++)
192
		hdr->rad[i] = buf[idx++];
193
	hdr->broadcast = (buf[idx] >> 7) & 0x1;
194
	hdr->path_msg = (buf[idx] >> 6) & 0x1;
195
	hdr->msg_len = buf[idx] & 0x3f;
196
	idx++;
197
	hdr->somt = (buf[idx] >> 7) & 0x1;
198
	hdr->eomt = (buf[idx] >> 6) & 0x1;
199
	hdr->seqno = (buf[idx] >> 4) & 0x1;
200
	idx++;
201
	*hdrlen = idx;
202
	return true;
203
}
204
 
205
static void drm_dp_encode_sideband_req(struct drm_dp_sideband_msg_req_body *req,
206
				       struct drm_dp_sideband_msg_tx *raw)
207
{
208
	int idx = 0;
209
	int i;
210
	u8 *buf = raw->msg;
211
	buf[idx++] = req->req_type & 0x7f;
212
 
213
	switch (req->req_type) {
214
	case DP_ENUM_PATH_RESOURCES:
215
		buf[idx] = (req->u.port_num.port_number & 0xf) << 4;
216
		idx++;
217
		break;
218
	case DP_ALLOCATE_PAYLOAD:
219
		buf[idx] = (req->u.allocate_payload.port_number & 0xf) << 4 |
220
			(req->u.allocate_payload.number_sdp_streams & 0xf);
221
		idx++;
222
		buf[idx] = (req->u.allocate_payload.vcpi & 0x7f);
223
		idx++;
224
		buf[idx] = (req->u.allocate_payload.pbn >> 8);
225
		idx++;
226
		buf[idx] = (req->u.allocate_payload.pbn & 0xff);
227
		idx++;
228
		for (i = 0; i < req->u.allocate_payload.number_sdp_streams / 2; i++) {
229
			buf[idx] = ((req->u.allocate_payload.sdp_stream_sink[i * 2] & 0xf) << 4) |
230
				(req->u.allocate_payload.sdp_stream_sink[i * 2 + 1] & 0xf);
231
			idx++;
232
		}
233
		if (req->u.allocate_payload.number_sdp_streams & 1) {
234
			i = req->u.allocate_payload.number_sdp_streams - 1;
235
			buf[idx] = (req->u.allocate_payload.sdp_stream_sink[i] & 0xf) << 4;
236
			idx++;
237
		}
238
		break;
239
	case DP_QUERY_PAYLOAD:
240
		buf[idx] = (req->u.query_payload.port_number & 0xf) << 4;
241
		idx++;
242
		buf[idx] = (req->u.query_payload.vcpi & 0x7f);
243
		idx++;
244
		break;
245
	case DP_REMOTE_DPCD_READ:
246
		buf[idx] = (req->u.dpcd_read.port_number & 0xf) << 4;
247
		buf[idx] |= ((req->u.dpcd_read.dpcd_address & 0xf0000) >> 16) & 0xf;
248
		idx++;
249
		buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff00) >> 8;
250
		idx++;
251
		buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff);
252
		idx++;
253
		buf[idx] = (req->u.dpcd_read.num_bytes);
254
		idx++;
255
		break;
256
 
257
	case DP_REMOTE_DPCD_WRITE:
258
		buf[idx] = (req->u.dpcd_write.port_number & 0xf) << 4;
259
		buf[idx] |= ((req->u.dpcd_write.dpcd_address & 0xf0000) >> 16) & 0xf;
260
		idx++;
261
		buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff00) >> 8;
262
		idx++;
263
		buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff);
264
		idx++;
265
		buf[idx] = (req->u.dpcd_write.num_bytes);
266
		idx++;
267
		memcpy(&buf[idx], req->u.dpcd_write.bytes, req->u.dpcd_write.num_bytes);
268
		idx += req->u.dpcd_write.num_bytes;
269
		break;
270
	case DP_REMOTE_I2C_READ:
271
		buf[idx] = (req->u.i2c_read.port_number & 0xf) << 4;
272
		buf[idx] |= (req->u.i2c_read.num_transactions & 0x3);
273
		idx++;
274
		for (i = 0; i < (req->u.i2c_read.num_transactions & 0x3); i++) {
275
			buf[idx] = req->u.i2c_read.transactions[i].i2c_dev_id & 0x7f;
276
			idx++;
277
			buf[idx] = req->u.i2c_read.transactions[i].num_bytes;
278
			idx++;
279
			memcpy(&buf[idx], req->u.i2c_read.transactions[i].bytes, req->u.i2c_read.transactions[i].num_bytes);
280
			idx += req->u.i2c_read.transactions[i].num_bytes;
281
 
282
			buf[idx] = (req->u.i2c_read.transactions[i].no_stop_bit & 0x1) << 5;
283
			buf[idx] |= (req->u.i2c_read.transactions[i].i2c_transaction_delay & 0xf);
284
			idx++;
285
		}
286
		buf[idx] = (req->u.i2c_read.read_i2c_device_id) & 0x7f;
287
		idx++;
288
		buf[idx] = (req->u.i2c_read.num_bytes_read);
289
		idx++;
290
		break;
291
 
292
	case DP_REMOTE_I2C_WRITE:
293
		buf[idx] = (req->u.i2c_write.port_number & 0xf) << 4;
294
		idx++;
295
		buf[idx] = (req->u.i2c_write.write_i2c_device_id) & 0x7f;
296
		idx++;
297
		buf[idx] = (req->u.i2c_write.num_bytes);
298
		idx++;
299
		memcpy(&buf[idx], req->u.i2c_write.bytes, req->u.i2c_write.num_bytes);
300
		idx += req->u.i2c_write.num_bytes;
301
		break;
302
	}
303
	raw->cur_len = idx;
304
}
305
 
306
static void drm_dp_crc_sideband_chunk_req(u8 *msg, u8 len)
307
{
308
	u8 crc4;
309
	crc4 = drm_dp_msg_data_crc4(msg, len);
310
	msg[len] = crc4;
311
}
312
 
313
static void drm_dp_encode_sideband_reply(struct drm_dp_sideband_msg_reply_body *rep,
314
					 struct drm_dp_sideband_msg_tx *raw)
315
{
316
	int idx = 0;
317
	u8 *buf = raw->msg;
318
 
319
	buf[idx++] = (rep->reply_type & 0x1) << 7 | (rep->req_type & 0x7f);
320
 
321
	raw->cur_len = idx;
322
}
323
 
324
/* this adds a chunk of msg to the builder to get the final msg */
325
static bool drm_dp_sideband_msg_build(struct drm_dp_sideband_msg_rx *msg,
326
				      u8 *replybuf, u8 replybuflen, bool hdr)
327
{
328
	int ret;
329
	u8 crc4;
330
 
331
	if (hdr) {
332
		u8 hdrlen;
333
		struct drm_dp_sideband_msg_hdr recv_hdr;
334
		ret = drm_dp_decode_sideband_msg_hdr(&recv_hdr, replybuf, replybuflen, &hdrlen);
335
		if (ret == false) {
336
			print_hex_dump(KERN_DEBUG, "failed hdr", DUMP_PREFIX_NONE, 16, 1, replybuf, replybuflen, false);
337
			return false;
338
		}
339
 
340
		/* get length contained in this portion */
341
		msg->curchunk_len = recv_hdr.msg_len;
342
		msg->curchunk_hdrlen = hdrlen;
343
 
344
		/* we have already gotten an somt - don't bother parsing */
345
		if (recv_hdr.somt && msg->have_somt)
346
			return false;
347
 
348
		if (recv_hdr.somt) {
349
			memcpy(&msg->initial_hdr, &recv_hdr, sizeof(struct drm_dp_sideband_msg_hdr));
350
			msg->have_somt = true;
351
		}
352
		if (recv_hdr.eomt)
353
			msg->have_eomt = true;
354
 
355
		/* copy the bytes for the remainder of this header chunk */
356
		msg->curchunk_idx = min(msg->curchunk_len, (u8)(replybuflen - hdrlen));
357
		memcpy(&msg->chunk[0], replybuf + hdrlen, msg->curchunk_idx);
358
	} else {
359
		memcpy(&msg->chunk[msg->curchunk_idx], replybuf, replybuflen);
360
		msg->curchunk_idx += replybuflen;
361
	}
362
 
363
	if (msg->curchunk_idx >= msg->curchunk_len) {
364
		/* do CRC */
365
		crc4 = drm_dp_msg_data_crc4(msg->chunk, msg->curchunk_len - 1);
366
		/* copy chunk into bigger msg */
367
		memcpy(&msg->msg[msg->curlen], msg->chunk, msg->curchunk_len - 1);
368
		msg->curlen += msg->curchunk_len - 1;
369
	}
370
	return true;
371
}
372
 
373
static bool drm_dp_sideband_parse_link_address(struct drm_dp_sideband_msg_rx *raw,
374
					       struct drm_dp_sideband_msg_reply_body *repmsg)
375
{
376
	int idx = 1;
377
	int i;
378
	memcpy(repmsg->u.link_addr.guid, &raw->msg[idx], 16);
379
	idx += 16;
380
	repmsg->u.link_addr.nports = raw->msg[idx] & 0xf;
381
	idx++;
382
	if (idx > raw->curlen)
383
		goto fail_len;
384
	for (i = 0; i < repmsg->u.link_addr.nports; i++) {
385
		if (raw->msg[idx] & 0x80)
386
			repmsg->u.link_addr.ports[i].input_port = 1;
387
 
388
		repmsg->u.link_addr.ports[i].peer_device_type = (raw->msg[idx] >> 4) & 0x7;
389
		repmsg->u.link_addr.ports[i].port_number = (raw->msg[idx] & 0xf);
390
 
391
		idx++;
392
		if (idx > raw->curlen)
393
			goto fail_len;
394
		repmsg->u.link_addr.ports[i].mcs = (raw->msg[idx] >> 7) & 0x1;
395
		repmsg->u.link_addr.ports[i].ddps = (raw->msg[idx] >> 6) & 0x1;
396
		if (repmsg->u.link_addr.ports[i].input_port == 0)
397
			repmsg->u.link_addr.ports[i].legacy_device_plug_status = (raw->msg[idx] >> 5) & 0x1;
398
		idx++;
399
		if (idx > raw->curlen)
400
			goto fail_len;
401
		if (repmsg->u.link_addr.ports[i].input_port == 0) {
402
			repmsg->u.link_addr.ports[i].dpcd_revision = (raw->msg[idx]);
403
			idx++;
404
			if (idx > raw->curlen)
405
				goto fail_len;
406
			memcpy(repmsg->u.link_addr.ports[i].peer_guid, &raw->msg[idx], 16);
407
			idx += 16;
408
			if (idx > raw->curlen)
409
				goto fail_len;
410
			repmsg->u.link_addr.ports[i].num_sdp_streams = (raw->msg[idx] >> 4) & 0xf;
411
			repmsg->u.link_addr.ports[i].num_sdp_stream_sinks = (raw->msg[idx] & 0xf);
412
			idx++;
413
 
414
		}
415
		if (idx > raw->curlen)
416
			goto fail_len;
417
	}
418
 
419
	return true;
420
fail_len:
421
	DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen);
422
	return false;
423
}
424
 
425
static bool drm_dp_sideband_parse_remote_dpcd_read(struct drm_dp_sideband_msg_rx *raw,
426
						   struct drm_dp_sideband_msg_reply_body *repmsg)
427
{
428
	int idx = 1;
429
	repmsg->u.remote_dpcd_read_ack.port_number = raw->msg[idx] & 0xf;
430
	idx++;
431
	if (idx > raw->curlen)
432
		goto fail_len;
433
	repmsg->u.remote_dpcd_read_ack.num_bytes = raw->msg[idx];
434
	if (idx > raw->curlen)
435
		goto fail_len;
436
 
437
	memcpy(repmsg->u.remote_dpcd_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_dpcd_read_ack.num_bytes);
438
	return true;
439
fail_len:
440
	DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen);
441
	return false;
442
}
443
 
444
static bool drm_dp_sideband_parse_remote_dpcd_write(struct drm_dp_sideband_msg_rx *raw,
445
						      struct drm_dp_sideband_msg_reply_body *repmsg)
446
{
447
	int idx = 1;
448
	repmsg->u.remote_dpcd_write_ack.port_number = raw->msg[idx] & 0xf;
449
	idx++;
450
	if (idx > raw->curlen)
451
		goto fail_len;
452
	return true;
453
fail_len:
454
	DRM_DEBUG_KMS("parse length fail %d %d\n", idx, raw->curlen);
455
	return false;
456
}
457
 
458
static bool drm_dp_sideband_parse_remote_i2c_read_ack(struct drm_dp_sideband_msg_rx *raw,
459
						      struct drm_dp_sideband_msg_reply_body *repmsg)
460
{
461
	int idx = 1;
462
 
463
	repmsg->u.remote_i2c_read_ack.port_number = (raw->msg[idx] & 0xf);
464
	idx++;
465
	if (idx > raw->curlen)
466
		goto fail_len;
467
	repmsg->u.remote_i2c_read_ack.num_bytes = raw->msg[idx];
468
	idx++;
469
	/* TODO check */
470
	memcpy(repmsg->u.remote_i2c_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_i2c_read_ack.num_bytes);
471
	return true;
472
fail_len:
473
	DRM_DEBUG_KMS("remote i2c reply parse length fail %d %d\n", idx, raw->curlen);
474
	return false;
475
}
476
 
477
static bool drm_dp_sideband_parse_enum_path_resources_ack(struct drm_dp_sideband_msg_rx *raw,
478
							  struct drm_dp_sideband_msg_reply_body *repmsg)
479
{
480
	int idx = 1;
481
	repmsg->u.path_resources.port_number = (raw->msg[idx] >> 4) & 0xf;
482
	idx++;
483
	if (idx > raw->curlen)
484
		goto fail_len;
485
	repmsg->u.path_resources.full_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
486
	idx += 2;
487
	if (idx > raw->curlen)
488
		goto fail_len;
489
	repmsg->u.path_resources.avail_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
490
	idx += 2;
491
	if (idx > raw->curlen)
492
		goto fail_len;
493
	return true;
494
fail_len:
495
	DRM_DEBUG_KMS("enum resource parse length fail %d %d\n", idx, raw->curlen);
496
	return false;
497
}
498
 
499
static bool drm_dp_sideband_parse_allocate_payload_ack(struct drm_dp_sideband_msg_rx *raw,
500
							  struct drm_dp_sideband_msg_reply_body *repmsg)
501
{
502
	int idx = 1;
503
	repmsg->u.allocate_payload.port_number = (raw->msg[idx] >> 4) & 0xf;
504
	idx++;
505
	if (idx > raw->curlen)
506
		goto fail_len;
507
	repmsg->u.allocate_payload.vcpi = raw->msg[idx];
508
	idx++;
509
	if (idx > raw->curlen)
510
		goto fail_len;
511
	repmsg->u.allocate_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
512
	idx += 2;
513
	if (idx > raw->curlen)
514
		goto fail_len;
515
	return true;
516
fail_len:
517
	DRM_DEBUG_KMS("allocate payload parse length fail %d %d\n", idx, raw->curlen);
518
	return false;
519
}
520
 
521
static bool drm_dp_sideband_parse_query_payload_ack(struct drm_dp_sideband_msg_rx *raw,
522
						    struct drm_dp_sideband_msg_reply_body *repmsg)
523
{
524
	int idx = 1;
525
	repmsg->u.query_payload.port_number = (raw->msg[idx] >> 4) & 0xf;
526
	idx++;
527
	if (idx > raw->curlen)
528
		goto fail_len;
529
	repmsg->u.query_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]);
530
	idx += 2;
531
	if (idx > raw->curlen)
532
		goto fail_len;
533
	return true;
534
fail_len:
535
	DRM_DEBUG_KMS("query payload parse length fail %d %d\n", idx, raw->curlen);
536
	return false;
537
}
538
 
539
static bool drm_dp_sideband_parse_reply(struct drm_dp_sideband_msg_rx *raw,
540
					struct drm_dp_sideband_msg_reply_body *msg)
541
{
542
	memset(msg, 0, sizeof(*msg));
543
	msg->reply_type = (raw->msg[0] & 0x80) >> 7;
544
	msg->req_type = (raw->msg[0] & 0x7f);
545
 
546
	if (msg->reply_type) {
547
		memcpy(msg->u.nak.guid, &raw->msg[1], 16);
548
		msg->u.nak.reason = raw->msg[17];
549
		msg->u.nak.nak_data = raw->msg[18];
550
		return false;
551
	}
552
 
553
	switch (msg->req_type) {
554
	case DP_LINK_ADDRESS:
555
		return drm_dp_sideband_parse_link_address(raw, msg);
556
	case DP_QUERY_PAYLOAD:
557
		return drm_dp_sideband_parse_query_payload_ack(raw, msg);
558
	case DP_REMOTE_DPCD_READ:
559
		return drm_dp_sideband_parse_remote_dpcd_read(raw, msg);
560
	case DP_REMOTE_DPCD_WRITE:
561
		return drm_dp_sideband_parse_remote_dpcd_write(raw, msg);
562
	case DP_REMOTE_I2C_READ:
563
		return drm_dp_sideband_parse_remote_i2c_read_ack(raw, msg);
564
	case DP_ENUM_PATH_RESOURCES:
565
		return drm_dp_sideband_parse_enum_path_resources_ack(raw, msg);
566
	case DP_ALLOCATE_PAYLOAD:
567
		return drm_dp_sideband_parse_allocate_payload_ack(raw, msg);
568
	default:
569
		DRM_ERROR("Got unknown reply 0x%02x\n", msg->req_type);
570
		return false;
571
	}
572
}
573
 
574
static bool drm_dp_sideband_parse_connection_status_notify(struct drm_dp_sideband_msg_rx *raw,
575
							   struct drm_dp_sideband_msg_req_body *msg)
576
{
577
	int idx = 1;
578
 
579
	msg->u.conn_stat.port_number = (raw->msg[idx] & 0xf0) >> 4;
580
	idx++;
581
	if (idx > raw->curlen)
582
		goto fail_len;
583
 
584
	memcpy(msg->u.conn_stat.guid, &raw->msg[idx], 16);
585
	idx += 16;
586
	if (idx > raw->curlen)
587
		goto fail_len;
588
 
589
	msg->u.conn_stat.legacy_device_plug_status = (raw->msg[idx] >> 6) & 0x1;
590
	msg->u.conn_stat.displayport_device_plug_status = (raw->msg[idx] >> 5) & 0x1;
591
	msg->u.conn_stat.message_capability_status = (raw->msg[idx] >> 4) & 0x1;
592
	msg->u.conn_stat.input_port = (raw->msg[idx] >> 3) & 0x1;
593
	msg->u.conn_stat.peer_device_type = (raw->msg[idx] & 0x7);
594
	idx++;
595
	return true;
596
fail_len:
597
	DRM_DEBUG_KMS("connection status reply parse length fail %d %d\n", idx, raw->curlen);
598
	return false;
599
}
600
 
601
static bool drm_dp_sideband_parse_resource_status_notify(struct drm_dp_sideband_msg_rx *raw,
602
							   struct drm_dp_sideband_msg_req_body *msg)
603
{
604
	int idx = 1;
605
 
606
	msg->u.resource_stat.port_number = (raw->msg[idx] & 0xf0) >> 4;
607
	idx++;
608
	if (idx > raw->curlen)
609
		goto fail_len;
610
 
611
	memcpy(msg->u.resource_stat.guid, &raw->msg[idx], 16);
612
	idx += 16;
613
	if (idx > raw->curlen)
614
		goto fail_len;
615
 
616
	msg->u.resource_stat.available_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]);
617
	idx++;
618
	return true;
619
fail_len:
620
	DRM_DEBUG_KMS("resource status reply parse length fail %d %d\n", idx, raw->curlen);
621
	return false;
622
}
623
 
624
static bool drm_dp_sideband_parse_req(struct drm_dp_sideband_msg_rx *raw,
625
				      struct drm_dp_sideband_msg_req_body *msg)
626
{
627
	memset(msg, 0, sizeof(*msg));
628
	msg->req_type = (raw->msg[0] & 0x7f);
629
 
630
	switch (msg->req_type) {
631
	case DP_CONNECTION_STATUS_NOTIFY:
632
		return drm_dp_sideband_parse_connection_status_notify(raw, msg);
633
	case DP_RESOURCE_STATUS_NOTIFY:
634
		return drm_dp_sideband_parse_resource_status_notify(raw, msg);
635
	default:
636
		DRM_ERROR("Got unknown request 0x%02x\n", msg->req_type);
637
		return false;
638
	}
639
}
640
 
641
static int build_dpcd_write(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32 offset, u8 num_bytes, u8 *bytes)
642
{
643
	struct drm_dp_sideband_msg_req_body req;
644
 
645
	req.req_type = DP_REMOTE_DPCD_WRITE;
646
	req.u.dpcd_write.port_number = port_num;
647
	req.u.dpcd_write.dpcd_address = offset;
648
	req.u.dpcd_write.num_bytes = num_bytes;
649
	req.u.dpcd_write.bytes = bytes;
650
	drm_dp_encode_sideband_req(&req, msg);
651
 
652
	return 0;
653
}
654
 
655
static int build_link_address(struct drm_dp_sideband_msg_tx *msg)
656
{
657
	struct drm_dp_sideband_msg_req_body req;
658
 
659
	req.req_type = DP_LINK_ADDRESS;
660
	drm_dp_encode_sideband_req(&req, msg);
661
	return 0;
662
}
663
 
664
static int build_enum_path_resources(struct drm_dp_sideband_msg_tx *msg, int port_num)
665
{
666
	struct drm_dp_sideband_msg_req_body req;
667
 
668
	req.req_type = DP_ENUM_PATH_RESOURCES;
669
	req.u.port_num.port_number = port_num;
670
	drm_dp_encode_sideband_req(&req, msg);
671
	msg->path_msg = true;
672
	return 0;
673
}
674
 
675
static int build_allocate_payload(struct drm_dp_sideband_msg_tx *msg, int port_num,
676
				  u8 vcpi, uint16_t pbn)
677
{
678
	struct drm_dp_sideband_msg_req_body req;
679
	memset(&req, 0, sizeof(req));
680
	req.req_type = DP_ALLOCATE_PAYLOAD;
681
	req.u.allocate_payload.port_number = port_num;
682
	req.u.allocate_payload.vcpi = vcpi;
683
	req.u.allocate_payload.pbn = pbn;
684
	drm_dp_encode_sideband_req(&req, msg);
685
	msg->path_msg = true;
686
	return 0;
687
}
688
 
689
static int drm_dp_mst_assign_payload_id(struct drm_dp_mst_topology_mgr *mgr,
690
					struct drm_dp_vcpi *vcpi)
691
{
5271 serge 692
	int ret, vcpi_ret;
5060 serge 693
 
694
	mutex_lock(&mgr->payload_lock);
695
	ret = find_first_zero_bit(&mgr->payload_mask, mgr->max_payloads + 1);
696
	if (ret > mgr->max_payloads) {
697
		ret = -EINVAL;
698
		DRM_DEBUG_KMS("out of payload ids %d\n", ret);
699
		goto out_unlock;
700
	}
701
 
5271 serge 702
	vcpi_ret = find_first_zero_bit(&mgr->vcpi_mask, mgr->max_payloads + 1);
703
	if (vcpi_ret > mgr->max_payloads) {
704
		ret = -EINVAL;
705
		DRM_DEBUG_KMS("out of vcpi ids %d\n", ret);
706
		goto out_unlock;
707
	}
708
 
5060 serge 709
	set_bit(ret, &mgr->payload_mask);
5271 serge 710
	set_bit(vcpi_ret, &mgr->vcpi_mask);
711
	vcpi->vcpi = vcpi_ret + 1;
5060 serge 712
	mgr->proposed_vcpis[ret - 1] = vcpi;
713
out_unlock:
714
	mutex_unlock(&mgr->payload_lock);
715
	return ret;
716
}
717
 
718
static void drm_dp_mst_put_payload_id(struct drm_dp_mst_topology_mgr *mgr,
5271 serge 719
				      int vcpi)
5060 serge 720
{
5271 serge 721
	int i;
722
	if (vcpi == 0)
5060 serge 723
		return;
724
 
725
	mutex_lock(&mgr->payload_lock);
5271 serge 726
	DRM_DEBUG_KMS("putting payload %d\n", vcpi);
727
	clear_bit(vcpi - 1, &mgr->vcpi_mask);
728
 
729
	for (i = 0; i < mgr->max_payloads; i++) {
730
		if (mgr->proposed_vcpis[i])
731
			if (mgr->proposed_vcpis[i]->vcpi == vcpi) {
732
				mgr->proposed_vcpis[i] = NULL;
733
				clear_bit(i + 1, &mgr->payload_mask);
734
			}
735
	}
5060 serge 736
	mutex_unlock(&mgr->payload_lock);
737
}
738
 
739
static bool check_txmsg_state(struct drm_dp_mst_topology_mgr *mgr,
740
			      struct drm_dp_sideband_msg_tx *txmsg)
741
{
742
	bool ret;
6084 serge 743
 
744
	/*
745
	 * All updates to txmsg->state are protected by mgr->qlock, and the two
746
	 * cases we check here are terminal states. For those the barriers
747
	 * provided by the wake_up/wait_event pair are enough.
748
	 */
5060 serge 749
	ret = (txmsg->state == DRM_DP_SIDEBAND_TX_RX ||
750
	       txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT);
751
	return ret;
752
}
753
 
754
static int drm_dp_mst_wait_tx_reply(struct drm_dp_mst_branch *mstb,
755
				    struct drm_dp_sideband_msg_tx *txmsg)
756
{
757
	struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
758
	int ret;
759
 
760
	ret = wait_event_timeout(mgr->tx_waitq,
761
				 check_txmsg_state(mgr, txmsg),
762
				 (4 * HZ));
763
	mutex_lock(&mstb->mgr->qlock);
764
	if (ret > 0) {
765
		if (txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT) {
766
			ret = -EIO;
767
			goto out;
768
		}
769
	} else {
770
		DRM_DEBUG_KMS("timedout msg send %p %d %d\n", txmsg, txmsg->state, txmsg->seqno);
771
 
772
		/* dump some state */
773
		ret = -EIO;
774
 
775
		/* remove from q */
776
		if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED ||
777
		    txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND) {
778
			list_del(&txmsg->next);
779
		}
780
 
781
		if (txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND ||
782
		    txmsg->state == DRM_DP_SIDEBAND_TX_SENT) {
783
			mstb->tx_slots[txmsg->seqno] = NULL;
784
		}
785
	}
786
out:
787
	mutex_unlock(&mgr->qlock);
788
 
789
	return ret;
790
}
791
 
792
static struct drm_dp_mst_branch *drm_dp_add_mst_branch_device(u8 lct, u8 *rad)
793
{
794
	struct drm_dp_mst_branch *mstb;
795
 
796
	mstb = kzalloc(sizeof(*mstb), GFP_KERNEL);
797
	if (!mstb)
798
		return NULL;
799
 
800
	mstb->lct = lct;
801
	if (lct > 1)
802
		memcpy(mstb->rad, rad, lct / 2);
803
	INIT_LIST_HEAD(&mstb->ports);
804
	kref_init(&mstb->kref);
805
	return mstb;
806
}
807
 
808
static void drm_dp_destroy_mst_branch_device(struct kref *kref)
809
{
810
	struct drm_dp_mst_branch *mstb = container_of(kref, struct drm_dp_mst_branch, kref);
811
	struct drm_dp_mst_port *port, *tmp;
812
	bool wake_tx = false;
813
 
814
	/*
815
	 * destroy all ports - don't need lock
816
	 * as there are no more references to the mst branch
817
	 * device at this point.
818
	 */
819
	list_for_each_entry_safe(port, tmp, &mstb->ports, next) {
820
		list_del(&port->next);
821
		drm_dp_put_port(port);
822
	}
823
 
824
	/* drop any tx slots msg */
825
	mutex_lock(&mstb->mgr->qlock);
826
	if (mstb->tx_slots[0]) {
827
		mstb->tx_slots[0]->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
828
		mstb->tx_slots[0] = NULL;
829
		wake_tx = true;
830
	}
831
	if (mstb->tx_slots[1]) {
832
		mstb->tx_slots[1]->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
833
		mstb->tx_slots[1] = NULL;
834
		wake_tx = true;
835
	}
836
	mutex_unlock(&mstb->mgr->qlock);
837
 
838
//   if (wake_tx)
839
//       wake_up(&mstb->mgr->tx_waitq);
840
	kfree(mstb);
841
}
842
 
843
static void drm_dp_put_mst_branch_device(struct drm_dp_mst_branch *mstb)
844
{
845
	kref_put(&mstb->kref, drm_dp_destroy_mst_branch_device);
846
}
847
 
848
 
849
static void drm_dp_port_teardown_pdt(struct drm_dp_mst_port *port, int old_pdt)
850
{
5271 serge 851
	struct drm_dp_mst_branch *mstb;
852
 
5060 serge 853
	switch (old_pdt) {
854
	case DP_PEER_DEVICE_DP_LEGACY_CONV:
855
	case DP_PEER_DEVICE_SST_SINK:
856
		/* remove i2c over sideband */
857
		drm_dp_mst_unregister_i2c_bus(&port->aux);
858
		break;
859
	case DP_PEER_DEVICE_MST_BRANCHING:
5271 serge 860
		mstb = port->mstb;
5060 serge 861
		port->mstb = NULL;
5271 serge 862
		drm_dp_put_mst_branch_device(mstb);
5060 serge 863
		break;
864
	}
865
}
866
 
867
static void drm_dp_destroy_port(struct kref *kref)
868
{
869
	struct drm_dp_mst_port *port = container_of(kref, struct drm_dp_mst_port, kref);
870
	struct drm_dp_mst_topology_mgr *mgr = port->mgr;
6084 serge 871
 
5060 serge 872
	if (!port->input) {
873
		port->vcpi.num_slots = 0;
5271 serge 874
 
875
		kfree(port->cached_edid);
6084 serge 876
 
877
		/*
878
		 * The only time we don't have a connector
879
		 * on an output port is if the connector init
880
		 * fails.
881
		 */
882
		if (port->connector) {
883
			/* we can't destroy the connector here, as
884
			 * we might be holding the mode_config.mutex
885
			 * from an EDID retrieval */
886
 
887
			mutex_lock(&mgr->destroy_connector_lock);
888
			list_add(&port->next, &mgr->destroy_connector_list);
889
			mutex_unlock(&mgr->destroy_connector_lock);
890
//		schedule_work(&mgr->destroy_connector_work);
891
			return;
892
		}
893
		/* no need to clean up vcpi
894
		 * as if we have no connector we never setup a vcpi */
5060 serge 895
		drm_dp_port_teardown_pdt(port, port->pdt);
896
	}
897
	kfree(port);
898
}
899
 
900
static void drm_dp_put_port(struct drm_dp_mst_port *port)
901
{
902
	kref_put(&port->kref, drm_dp_destroy_port);
903
}
904
 
905
static struct drm_dp_mst_branch *drm_dp_mst_get_validated_mstb_ref_locked(struct drm_dp_mst_branch *mstb, struct drm_dp_mst_branch *to_find)
906
{
907
	struct drm_dp_mst_port *port;
908
	struct drm_dp_mst_branch *rmstb;
909
	if (to_find == mstb) {
910
		kref_get(&mstb->kref);
911
		return mstb;
912
	}
913
	list_for_each_entry(port, &mstb->ports, next) {
914
		if (port->mstb) {
915
			rmstb = drm_dp_mst_get_validated_mstb_ref_locked(port->mstb, to_find);
916
			if (rmstb)
917
				return rmstb;
918
		}
919
	}
920
	return NULL;
921
}
922
 
923
static struct drm_dp_mst_branch *drm_dp_get_validated_mstb_ref(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_branch *mstb)
924
{
925
	struct drm_dp_mst_branch *rmstb = NULL;
926
	mutex_lock(&mgr->lock);
927
	if (mgr->mst_primary)
928
		rmstb = drm_dp_mst_get_validated_mstb_ref_locked(mgr->mst_primary, mstb);
929
	mutex_unlock(&mgr->lock);
930
	return rmstb;
931
}
932
 
933
static struct drm_dp_mst_port *drm_dp_mst_get_port_ref_locked(struct drm_dp_mst_branch *mstb, struct drm_dp_mst_port *to_find)
934
{
935
	struct drm_dp_mst_port *port, *mport;
936
 
937
	list_for_each_entry(port, &mstb->ports, next) {
938
		if (port == to_find) {
939
			kref_get(&port->kref);
940
			return port;
941
		}
942
		if (port->mstb) {
943
			mport = drm_dp_mst_get_port_ref_locked(port->mstb, to_find);
944
			if (mport)
945
				return mport;
946
		}
947
	}
948
	return NULL;
949
}
950
 
951
static struct drm_dp_mst_port *drm_dp_get_validated_port_ref(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
952
{
953
	struct drm_dp_mst_port *rport = NULL;
954
	mutex_lock(&mgr->lock);
955
	if (mgr->mst_primary)
956
		rport = drm_dp_mst_get_port_ref_locked(mgr->mst_primary, port);
957
	mutex_unlock(&mgr->lock);
958
	return rport;
959
}
960
 
961
static struct drm_dp_mst_port *drm_dp_get_port(struct drm_dp_mst_branch *mstb, u8 port_num)
962
{
963
	struct drm_dp_mst_port *port;
964
 
965
	list_for_each_entry(port, &mstb->ports, next) {
966
		if (port->port_num == port_num) {
967
			kref_get(&port->kref);
968
			return port;
969
		}
970
	}
971
 
972
	return NULL;
973
}
974
 
975
/*
976
 * calculate a new RAD for this MST branch device
977
 * if parent has an LCT of 2 then it has 1 nibble of RAD,
978
 * if parent has an LCT of 3 then it has 2 nibbles of RAD,
979
 */
980
static u8 drm_dp_calculate_rad(struct drm_dp_mst_port *port,
981
				 u8 *rad)
982
{
983
	int lct = port->parent->lct;
984
	int shift = 4;
985
	int idx = lct / 2;
986
	if (lct > 1) {
987
		memcpy(rad, port->parent->rad, idx);
988
		shift = (lct % 2) ? 4 : 0;
989
	} else
990
		rad[0] = 0;
991
 
992
	rad[idx] |= port->port_num << shift;
993
	return lct + 1;
994
}
995
 
996
/*
997
 * return sends link address for new mstb
998
 */
999
static bool drm_dp_port_setup_pdt(struct drm_dp_mst_port *port)
1000
{
1001
	int ret;
1002
	u8 rad[6], lct;
1003
	bool send_link = false;
1004
	switch (port->pdt) {
1005
	case DP_PEER_DEVICE_DP_LEGACY_CONV:
1006
	case DP_PEER_DEVICE_SST_SINK:
1007
		/* add i2c over sideband */
1008
		ret = drm_dp_mst_register_i2c_bus(&port->aux);
1009
		break;
1010
	case DP_PEER_DEVICE_MST_BRANCHING:
1011
		lct = drm_dp_calculate_rad(port, rad);
1012
 
1013
		port->mstb = drm_dp_add_mst_branch_device(lct, rad);
1014
		port->mstb->mgr = port->mgr;
1015
		port->mstb->port_parent = port;
1016
 
1017
		send_link = true;
1018
		break;
1019
	}
1020
	return send_link;
1021
}
1022
 
1023
static void drm_dp_check_port_guid(struct drm_dp_mst_branch *mstb,
1024
				   struct drm_dp_mst_port *port)
1025
{
1026
	int ret;
1027
	if (port->dpcd_rev >= 0x12) {
1028
		port->guid_valid = drm_dp_validate_guid(mstb->mgr, port->guid);
1029
		if (!port->guid_valid) {
1030
			ret = drm_dp_send_dpcd_write(mstb->mgr,
1031
						     port,
1032
						     DP_GUID,
1033
						     16, port->guid);
1034
			port->guid_valid = true;
1035
		}
1036
	}
1037
}
1038
 
6084 serge 1039
static void build_mst_prop_path(const struct drm_dp_mst_branch *mstb,
1040
				int pnum,
5271 serge 1041
				char *proppath,
1042
				size_t proppath_size)
5060 serge 1043
{
1044
	int i;
1045
	char temp[8];
5271 serge 1046
	snprintf(proppath, proppath_size, "mst:%d", mstb->mgr->conn_base_id);
5060 serge 1047
	for (i = 0; i < (mstb->lct - 1); i++) {
1048
		int shift = (i % 2) ? 0 : 4;
1049
		int port_num = mstb->rad[i / 2] >> shift;
5271 serge 1050
		snprintf(temp, sizeof(temp), "-%d", port_num);
1051
		strlcat(proppath, temp, proppath_size);
5060 serge 1052
	}
6084 serge 1053
	snprintf(temp, sizeof(temp), "-%d", pnum);
5271 serge 1054
	strlcat(proppath, temp, proppath_size);
5060 serge 1055
}
1056
 
1057
static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
1058
			    struct device *dev,
1059
			    struct drm_dp_link_addr_reply_port *port_msg)
1060
{
1061
	struct drm_dp_mst_port *port;
1062
	bool ret;
1063
	bool created = false;
1064
	int old_pdt = 0;
1065
	int old_ddps = 0;
1066
	port = drm_dp_get_port(mstb, port_msg->port_number);
1067
	if (!port) {
1068
		port = kzalloc(sizeof(*port), GFP_KERNEL);
1069
		if (!port)
1070
			return;
1071
		kref_init(&port->kref);
1072
		port->parent = mstb;
1073
		port->port_num = port_msg->port_number;
1074
		port->mgr = mstb->mgr;
1075
		port->aux.name = "DPMST";
1076
		port->aux.dev = dev;
1077
		created = true;
1078
	} else {
1079
		old_pdt = port->pdt;
1080
		old_ddps = port->ddps;
1081
	}
1082
 
1083
	port->pdt = port_msg->peer_device_type;
1084
	port->input = port_msg->input_port;
1085
	port->mcs = port_msg->mcs;
1086
	port->ddps = port_msg->ddps;
1087
	port->ldps = port_msg->legacy_device_plug_status;
1088
	port->dpcd_rev = port_msg->dpcd_revision;
1089
	port->num_sdp_streams = port_msg->num_sdp_streams;
1090
	port->num_sdp_stream_sinks = port_msg->num_sdp_stream_sinks;
1091
	memcpy(port->guid, port_msg->peer_guid, 16);
1092
 
1093
	/* manage mstb port lists with mgr lock - take a reference
1094
	   for this list */
1095
	if (created) {
1096
		mutex_lock(&mstb->mgr->lock);
1097
		kref_get(&port->kref);
1098
		list_add(&port->next, &mstb->ports);
1099
		mutex_unlock(&mstb->mgr->lock);
1100
	}
1101
 
1102
	if (old_ddps != port->ddps) {
1103
		if (port->ddps) {
1104
			drm_dp_check_port_guid(mstb, port);
1105
			if (!port->input)
1106
				drm_dp_send_enum_path_resources(mstb->mgr, mstb, port);
1107
		} else {
1108
			port->guid_valid = false;
1109
			port->available_pbn = 0;
1110
			}
1111
	}
1112
 
1113
	if (old_pdt != port->pdt && !port->input) {
1114
		drm_dp_port_teardown_pdt(port, old_pdt);
1115
 
1116
		ret = drm_dp_port_setup_pdt(port);
6084 serge 1117
		if (ret == true)
5060 serge 1118
			drm_dp_send_link_address(mstb->mgr, port->mstb);
1119
	}
1120
 
1121
	if (created && !port->input) {
1122
		char proppath[255];
6084 serge 1123
 
1124
		build_mst_prop_path(mstb, port->port_num, proppath, sizeof(proppath));
5060 serge 1125
		port->connector = (*mstb->mgr->cbs->add_connector)(mstb->mgr, port, proppath);
6084 serge 1126
		if (!port->connector) {
1127
			/* remove it from the port list */
1128
			mutex_lock(&mstb->mgr->lock);
1129
			list_del(&port->next);
1130
			mutex_unlock(&mstb->mgr->lock);
1131
			/* drop port list reference */
1132
			drm_dp_put_port(port);
1133
			goto out;
1134
		}
1135
		if (port->port_num >= DP_MST_LOGICAL_PORT_0) {
5271 serge 1136
			port->cached_edid = drm_get_edid(port->connector, &port->aux.ddc);
6084 serge 1137
			drm_mode_connector_set_tile_property(port->connector);
5271 serge 1138
		}
6084 serge 1139
		(*mstb->mgr->cbs->register_connector)(port->connector);
5060 serge 1140
	}
1141
 
6084 serge 1142
out:
5060 serge 1143
	/* put reference to this port */
1144
	drm_dp_put_port(port);
1145
}
1146
 
1147
static void drm_dp_update_port(struct drm_dp_mst_branch *mstb,
1148
			       struct drm_dp_connection_status_notify *conn_stat)
1149
{
1150
	struct drm_dp_mst_port *port;
1151
	int old_pdt;
1152
	int old_ddps;
1153
	bool dowork = false;
1154
	port = drm_dp_get_port(mstb, conn_stat->port_number);
1155
	if (!port)
1156
		return;
1157
 
1158
	old_ddps = port->ddps;
1159
	old_pdt = port->pdt;
1160
	port->pdt = conn_stat->peer_device_type;
1161
	port->mcs = conn_stat->message_capability_status;
1162
	port->ldps = conn_stat->legacy_device_plug_status;
1163
	port->ddps = conn_stat->displayport_device_plug_status;
1164
 
1165
	if (old_ddps != port->ddps) {
1166
		if (port->ddps) {
1167
			drm_dp_check_port_guid(mstb, port);
1168
			dowork = true;
1169
		} else {
1170
			port->guid_valid = false;
1171
			port->available_pbn = 0;
1172
		}
1173
	}
1174
	if (old_pdt != port->pdt && !port->input) {
1175
		drm_dp_port_teardown_pdt(port, old_pdt);
1176
 
1177
		if (drm_dp_port_setup_pdt(port))
1178
			dowork = true;
1179
	}
1180
 
1181
	drm_dp_put_port(port);
1182
//   if (dowork)
1183
//       queue_work(system_long_wq, &mstb->mgr->work);
1184
 
1185
}
1186
 
1187
static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_topology_mgr *mgr,
1188
							       u8 lct, u8 *rad)
1189
{
1190
	struct drm_dp_mst_branch *mstb;
1191
	struct drm_dp_mst_port *port;
1192
	int i;
1193
	/* find the port by iterating down */
6084 serge 1194
 
1195
	mutex_lock(&mgr->lock);
5060 serge 1196
	mstb = mgr->mst_primary;
1197
 
1198
	for (i = 0; i < lct - 1; i++) {
1199
		int shift = (i % 2) ? 0 : 4;
1200
		int port_num = rad[i / 2] >> shift;
1201
 
1202
		list_for_each_entry(port, &mstb->ports, next) {
1203
			if (port->port_num == port_num) {
6084 serge 1204
				mstb = port->mstb;
1205
				if (!mstb) {
5060 serge 1206
					DRM_ERROR("failed to lookup MSTB with lct %d, rad %02x\n", lct, rad[0]);
6084 serge 1207
					goto out;
5060 serge 1208
				}
1209
 
1210
				break;
1211
			}
1212
		}
1213
	}
1214
	kref_get(&mstb->kref);
6084 serge 1215
out:
1216
	mutex_unlock(&mgr->lock);
5060 serge 1217
	return mstb;
1218
}
1219
 
1220
static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
1221
					       struct drm_dp_mst_branch *mstb)
1222
{
1223
	struct drm_dp_mst_port *port;
6084 serge 1224
	struct drm_dp_mst_branch *mstb_child;
1225
	if (!mstb->link_address_sent)
1226
		drm_dp_send_link_address(mgr, mstb);
5060 serge 1227
 
1228
	list_for_each_entry(port, &mstb->ports, next) {
1229
		if (port->input)
1230
			continue;
1231
 
1232
		if (!port->ddps)
1233
			continue;
1234
 
1235
		if (!port->available_pbn)
1236
			drm_dp_send_enum_path_resources(mgr, mstb, port);
1237
 
6084 serge 1238
		if (port->mstb) {
1239
			mstb_child = drm_dp_get_validated_mstb_ref(mgr, port->mstb);
1240
			if (mstb_child) {
1241
				drm_dp_check_and_send_link_address(mgr, mstb_child);
1242
				drm_dp_put_mst_branch_device(mstb_child);
1243
			}
1244
		}
5060 serge 1245
	}
1246
}
1247
 
1248
static void drm_dp_mst_link_probe_work(struct work_struct *work)
1249
{
1250
	struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, work);
6084 serge 1251
	struct drm_dp_mst_branch *mstb;
5060 serge 1252
 
6084 serge 1253
	mutex_lock(&mgr->lock);
1254
	mstb = mgr->mst_primary;
1255
	if (mstb) {
1256
		kref_get(&mstb->kref);
1257
	}
1258
	mutex_unlock(&mgr->lock);
1259
	if (mstb) {
1260
		drm_dp_check_and_send_link_address(mgr, mstb);
1261
		drm_dp_put_mst_branch_device(mstb);
1262
	}
5060 serge 1263
}
1264
 
1265
static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
1266
				 u8 *guid)
1267
{
1268
	static u8 zero_guid[16];
1269
 
1270
	if (!memcmp(guid, zero_guid, 16)) {
1271
		u64 salt = get_jiffies_64();
1272
		memcpy(&guid[0], &salt, sizeof(u64));
1273
		memcpy(&guid[8], &salt, sizeof(u64));
1274
		return false;
1275
	}
1276
	return true;
1277
}
1278
 
1279
#if 0
1280
static int build_dpcd_read(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32 offset, u8 num_bytes)
1281
{
1282
	struct drm_dp_sideband_msg_req_body req;
1283
 
1284
	req.req_type = DP_REMOTE_DPCD_READ;
1285
	req.u.dpcd_read.port_number = port_num;
1286
	req.u.dpcd_read.dpcd_address = offset;
1287
	req.u.dpcd_read.num_bytes = num_bytes;
1288
	drm_dp_encode_sideband_req(&req, msg);
1289
 
1290
	return 0;
1291
}
1292
#endif
1293
 
1294
static int drm_dp_send_sideband_msg(struct drm_dp_mst_topology_mgr *mgr,
1295
				    bool up, u8 *msg, int len)
1296
{
1297
	int ret;
1298
	int regbase = up ? DP_SIDEBAND_MSG_UP_REP_BASE : DP_SIDEBAND_MSG_DOWN_REQ_BASE;
1299
	int tosend, total, offset;
1300
	int retries = 0;
1301
 
1302
retry:
1303
	total = len;
1304
	offset = 0;
1305
	do {
1306
		tosend = min3(mgr->max_dpcd_transaction_bytes, 16, total);
1307
 
1308
		ret = drm_dp_dpcd_write(mgr->aux, regbase + offset,
1309
					&msg[offset],
1310
					tosend);
1311
		if (ret != tosend) {
1312
			if (ret == -EIO && retries < 5) {
1313
				retries++;
1314
				goto retry;
1315
			}
1316
			DRM_DEBUG_KMS("failed to dpcd write %d %d\n", tosend, ret);
1317
 
1318
			return -EIO;
1319
		}
1320
		offset += tosend;
1321
		total -= tosend;
1322
	} while (total > 0);
1323
	return 0;
1324
}
1325
 
1326
static int set_hdr_from_dst_qlock(struct drm_dp_sideband_msg_hdr *hdr,
1327
				  struct drm_dp_sideband_msg_tx *txmsg)
1328
{
1329
	struct drm_dp_mst_branch *mstb = txmsg->dst;
1330
 
1331
	/* both msg slots are full */
1332
	if (txmsg->seqno == -1) {
1333
		if (mstb->tx_slots[0] && mstb->tx_slots[1]) {
1334
			DRM_DEBUG_KMS("%s: failed to find slot\n", __func__);
1335
			return -EAGAIN;
1336
		}
1337
		if (mstb->tx_slots[0] == NULL && mstb->tx_slots[1] == NULL) {
1338
			txmsg->seqno = mstb->last_seqno;
1339
			mstb->last_seqno ^= 1;
1340
		} else if (mstb->tx_slots[0] == NULL)
1341
			txmsg->seqno = 0;
1342
		else
1343
			txmsg->seqno = 1;
1344
		mstb->tx_slots[txmsg->seqno] = txmsg;
1345
	}
1346
	hdr->broadcast = 0;
1347
	hdr->path_msg = txmsg->path_msg;
1348
	hdr->lct = mstb->lct;
1349
	hdr->lcr = mstb->lct - 1;
1350
	if (mstb->lct > 1)
1351
		memcpy(hdr->rad, mstb->rad, mstb->lct / 2);
1352
	hdr->seqno = txmsg->seqno;
1353
	return 0;
1354
}
1355
/*
1356
 * process a single block of the next message in the sideband queue
1357
 */
1358
static int process_single_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
1359
				   struct drm_dp_sideband_msg_tx *txmsg,
1360
				   bool up)
1361
{
1362
	u8 chunk[48];
1363
	struct drm_dp_sideband_msg_hdr hdr;
1364
	int len, space, idx, tosend;
1365
	int ret;
1366
 
1367
	memset(&hdr, 0, sizeof(struct drm_dp_sideband_msg_hdr));
1368
 
1369
	if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED) {
1370
		txmsg->seqno = -1;
1371
		txmsg->state = DRM_DP_SIDEBAND_TX_START_SEND;
1372
	}
1373
 
1374
	/* make hdr from dst mst - for replies use seqno
1375
	   otherwise assign one */
1376
	ret = set_hdr_from_dst_qlock(&hdr, txmsg);
1377
	if (ret < 0)
1378
		return ret;
1379
 
1380
	/* amount left to send in this message */
1381
	len = txmsg->cur_len - txmsg->cur_offset;
1382
 
1383
	/* 48 - sideband msg size - 1 byte for data CRC, x header bytes */
1384
	space = 48 - 1 - drm_dp_calc_sb_hdr_size(&hdr);
1385
 
1386
	tosend = min(len, space);
1387
	if (len == txmsg->cur_len)
1388
		hdr.somt = 1;
1389
	if (space >= len)
1390
		hdr.eomt = 1;
1391
 
1392
 
1393
	hdr.msg_len = tosend + 1;
1394
	drm_dp_encode_sideband_msg_hdr(&hdr, chunk, &idx);
1395
	memcpy(&chunk[idx], &txmsg->msg[txmsg->cur_offset], tosend);
1396
	/* add crc at end */
1397
	drm_dp_crc_sideband_chunk_req(&chunk[idx], tosend);
1398
	idx += tosend + 1;
1399
 
1400
	ret = drm_dp_send_sideband_msg(mgr, up, chunk, idx);
1401
	if (ret) {
1402
		DRM_DEBUG_KMS("sideband msg failed to send\n");
1403
		return ret;
1404
	}
1405
 
1406
	txmsg->cur_offset += tosend;
1407
	if (txmsg->cur_offset == txmsg->cur_len) {
1408
		txmsg->state = DRM_DP_SIDEBAND_TX_SENT;
1409
		return 1;
1410
	}
1411
	return 0;
1412
}
1413
 
1414
static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
1415
{
1416
	struct drm_dp_sideband_msg_tx *txmsg;
1417
	int ret;
1418
 
6084 serge 1419
	WARN_ON(!mutex_is_locked(&mgr->qlock));
1420
 
5060 serge 1421
	/* construct a chunk from the first msg in the tx_msg queue */
1422
	if (list_empty(&mgr->tx_msg_downq)) {
1423
		mgr->tx_down_in_progress = false;
1424
		return;
1425
	}
1426
	mgr->tx_down_in_progress = true;
1427
 
1428
	txmsg = list_first_entry(&mgr->tx_msg_downq, struct drm_dp_sideband_msg_tx, next);
1429
	ret = process_single_tx_qlock(mgr, txmsg, false);
1430
	if (ret == 1) {
1431
		/* txmsg is sent it should be in the slots now */
1432
		list_del(&txmsg->next);
1433
	} else if (ret) {
1434
		DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
1435
		list_del(&txmsg->next);
1436
		if (txmsg->seqno != -1)
1437
			txmsg->dst->tx_slots[txmsg->seqno] = NULL;
1438
		txmsg->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
1439
//       wake_up(&mgr->tx_waitq);
1440
	}
1441
	if (list_empty(&mgr->tx_msg_downq)) {
1442
		mgr->tx_down_in_progress = false;
1443
		return;
1444
	}
1445
}
1446
 
1447
/* called holding qlock */
1448
static void process_single_up_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
1449
{
1450
	struct drm_dp_sideband_msg_tx *txmsg;
1451
	int ret;
1452
 
1453
	/* construct a chunk from the first msg in the tx_msg queue */
1454
	if (list_empty(&mgr->tx_msg_upq)) {
1455
		mgr->tx_up_in_progress = false;
1456
		return;
1457
	}
1458
 
1459
	txmsg = list_first_entry(&mgr->tx_msg_upq, struct drm_dp_sideband_msg_tx, next);
1460
	ret = process_single_tx_qlock(mgr, txmsg, true);
1461
	if (ret == 1) {
1462
		/* up txmsgs aren't put in slots - so free after we send it */
1463
		list_del(&txmsg->next);
1464
		kfree(txmsg);
1465
	} else if (ret)
1466
		DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
1467
	mgr->tx_up_in_progress = true;
1468
}
1469
 
1470
static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,
1471
				 struct drm_dp_sideband_msg_tx *txmsg)
1472
{
1473
	mutex_lock(&mgr->qlock);
1474
	list_add_tail(&txmsg->next, &mgr->tx_msg_downq);
1475
	if (!mgr->tx_down_in_progress)
1476
		process_single_down_tx_qlock(mgr);
1477
	mutex_unlock(&mgr->qlock);
1478
}
1479
 
6084 serge 1480
static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
1481
				     struct drm_dp_mst_branch *mstb)
5060 serge 1482
{
1483
	int len;
1484
	struct drm_dp_sideband_msg_tx *txmsg;
1485
	int ret;
1486
 
1487
	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
1488
	if (!txmsg)
6084 serge 1489
		return;
5060 serge 1490
 
1491
	txmsg->dst = mstb;
1492
	len = build_link_address(txmsg);
1493
 
6084 serge 1494
	mstb->link_address_sent = true;
5060 serge 1495
	drm_dp_queue_down_tx(mgr, txmsg);
1496
 
1497
	ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
1498
	if (ret > 0) {
1499
		int i;
1500
 
1501
		if (txmsg->reply.reply_type == 1)
1502
			DRM_DEBUG_KMS("link address nak received\n");
1503
		else {
1504
			DRM_DEBUG_KMS("link address reply: %d\n", txmsg->reply.u.link_addr.nports);
1505
			for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) {
1506
				DRM_DEBUG_KMS("port %d: input %d, pdt: %d, pn: %d, dpcd_rev: %02x, mcs: %d, ddps: %d, ldps %d, sdp %d/%d\n", i,
1507
				       txmsg->reply.u.link_addr.ports[i].input_port,
1508
				       txmsg->reply.u.link_addr.ports[i].peer_device_type,
1509
				       txmsg->reply.u.link_addr.ports[i].port_number,
1510
				       txmsg->reply.u.link_addr.ports[i].dpcd_revision,
1511
				       txmsg->reply.u.link_addr.ports[i].mcs,
1512
				       txmsg->reply.u.link_addr.ports[i].ddps,
1513
				       txmsg->reply.u.link_addr.ports[i].legacy_device_plug_status,
1514
				       txmsg->reply.u.link_addr.ports[i].num_sdp_streams,
1515
				       txmsg->reply.u.link_addr.ports[i].num_sdp_stream_sinks);
1516
			}
1517
			for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) {
1518
				drm_dp_add_port(mstb, mgr->dev, &txmsg->reply.u.link_addr.ports[i]);
1519
			}
1520
			(*mgr->cbs->hotplug)(mgr);
1521
		}
6084 serge 1522
	} else {
1523
		mstb->link_address_sent = false;
5060 serge 1524
		DRM_DEBUG_KMS("link address failed %d\n", ret);
6084 serge 1525
	}
5060 serge 1526
 
1527
	kfree(txmsg);
1528
}
1529
 
1530
static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
1531
					   struct drm_dp_mst_branch *mstb,
1532
					   struct drm_dp_mst_port *port)
1533
{
1534
	int len;
1535
	struct drm_dp_sideband_msg_tx *txmsg;
1536
	int ret;
1537
 
1538
	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
1539
	if (!txmsg)
1540
		return -ENOMEM;
1541
 
1542
	txmsg->dst = mstb;
1543
	len = build_enum_path_resources(txmsg, port->port_num);
1544
 
1545
	drm_dp_queue_down_tx(mgr, txmsg);
1546
 
1547
	ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
1548
	if (ret > 0) {
1549
		if (txmsg->reply.reply_type == 1)
1550
			DRM_DEBUG_KMS("enum path resources nak received\n");
1551
		else {
1552
			if (port->port_num != txmsg->reply.u.path_resources.port_number)
1553
				DRM_ERROR("got incorrect port in response\n");
1554
			DRM_DEBUG_KMS("enum path resources %d: %d %d\n", txmsg->reply.u.path_resources.port_number, txmsg->reply.u.path_resources.full_payload_bw_number,
1555
			       txmsg->reply.u.path_resources.avail_payload_bw_number);
1556
			port->available_pbn = txmsg->reply.u.path_resources.avail_payload_bw_number;
1557
		}
1558
	}
1559
 
1560
	kfree(txmsg);
1561
	return 0;
1562
}
1563
 
1564
static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
1565
				   struct drm_dp_mst_port *port,
1566
				   int id,
1567
				   int pbn)
1568
{
1569
	struct drm_dp_sideband_msg_tx *txmsg;
1570
	struct drm_dp_mst_branch *mstb;
1571
	int len, ret;
1572
 
1573
	mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
1574
	if (!mstb)
1575
		return -EINVAL;
1576
 
1577
	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
1578
	if (!txmsg) {
1579
		ret = -ENOMEM;
1580
		goto fail_put;
1581
	}
1582
 
1583
	txmsg->dst = mstb;
1584
	len = build_allocate_payload(txmsg, port->port_num,
1585
				     id,
1586
				     pbn);
1587
 
1588
	drm_dp_queue_down_tx(mgr, txmsg);
1589
 
1590
	ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
1591
	if (ret > 0) {
1592
		if (txmsg->reply.reply_type == 1) {
1593
			ret = -EINVAL;
1594
		} else
1595
			ret = 0;
1596
	}
1597
	kfree(txmsg);
1598
fail_put:
1599
	drm_dp_put_mst_branch_device(mstb);
1600
	return ret;
1601
}
1602
 
1603
static int drm_dp_create_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
1604
				       int id,
1605
				       struct drm_dp_payload *payload)
1606
{
1607
	int ret;
1608
 
1609
	ret = drm_dp_dpcd_write_payload(mgr, id, payload);
1610
	if (ret < 0) {
1611
		payload->payload_state = 0;
1612
		return ret;
1613
	}
1614
	payload->payload_state = DP_PAYLOAD_LOCAL;
1615
	return 0;
1616
}
1617
 
1618
static int drm_dp_create_payload_step2(struct drm_dp_mst_topology_mgr *mgr,
1619
				       struct drm_dp_mst_port *port,
1620
				       int id,
1621
				       struct drm_dp_payload *payload)
1622
{
1623
	int ret;
1624
	ret = drm_dp_payload_send_msg(mgr, port, id, port->vcpi.pbn);
1625
	if (ret < 0)
1626
		return ret;
1627
	payload->payload_state = DP_PAYLOAD_REMOTE;
1628
	return ret;
1629
}
1630
 
1631
static int drm_dp_destroy_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
1632
					struct drm_dp_mst_port *port,
1633
					int id,
1634
					struct drm_dp_payload *payload)
1635
{
1636
	DRM_DEBUG_KMS("\n");
1637
	/* its okay for these to fail */
1638
	if (port) {
1639
		drm_dp_payload_send_msg(mgr, port, id, 0);
1640
	}
1641
 
1642
	drm_dp_dpcd_write_payload(mgr, id, payload);
5271 serge 1643
	payload->payload_state = DP_PAYLOAD_DELETE_LOCAL;
5060 serge 1644
	return 0;
1645
}
1646
 
1647
static int drm_dp_destroy_payload_step2(struct drm_dp_mst_topology_mgr *mgr,
1648
					int id,
1649
					struct drm_dp_payload *payload)
1650
{
1651
	payload->payload_state = 0;
1652
	return 0;
1653
}
1654
 
1655
/**
1656
 * drm_dp_update_payload_part1() - Execute payload update part 1
1657
 * @mgr: manager to use.
1658
 *
1659
 * This iterates over all proposed virtual channels, and tries to
1660
 * allocate space in the link for them. For 0->slots transitions,
1661
 * this step just writes the VCPI to the MST device. For slots->0
1662
 * transitions, this writes the updated VCPIs and removes the
1663
 * remote VC payloads.
1664
 *
1665
 * after calling this the driver should generate ACT and payload
1666
 * packets.
1667
 */
1668
int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
1669
{
5271 serge 1670
	int i, j;
5060 serge 1671
	int cur_slots = 1;
1672
	struct drm_dp_payload req_payload;
1673
	struct drm_dp_mst_port *port;
1674
 
1675
	mutex_lock(&mgr->payload_lock);
1676
	for (i = 0; i < mgr->max_payloads; i++) {
1677
		/* solve the current payloads - compare to the hw ones
1678
		   - update the hw view */
1679
		req_payload.start_slot = cur_slots;
1680
		if (mgr->proposed_vcpis[i]) {
1681
			port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
1682
			req_payload.num_slots = mgr->proposed_vcpis[i]->num_slots;
1683
		} else {
1684
			port = NULL;
1685
			req_payload.num_slots = 0;
1686
		}
5271 serge 1687
 
1688
		if (mgr->payloads[i].start_slot != req_payload.start_slot) {
1689
			mgr->payloads[i].start_slot = req_payload.start_slot;
1690
		}
5060 serge 1691
		/* work out what is required to happen with this payload */
5271 serge 1692
		if (mgr->payloads[i].num_slots != req_payload.num_slots) {
5060 serge 1693
 
1694
			/* need to push an update for this payload */
1695
			if (req_payload.num_slots) {
5271 serge 1696
				drm_dp_create_payload_step1(mgr, mgr->proposed_vcpis[i]->vcpi, &req_payload);
5060 serge 1697
				mgr->payloads[i].num_slots = req_payload.num_slots;
1698
			} else if (mgr->payloads[i].num_slots) {
1699
				mgr->payloads[i].num_slots = 0;
5271 serge 1700
				drm_dp_destroy_payload_step1(mgr, port, port->vcpi.vcpi, &mgr->payloads[i]);
5060 serge 1701
				req_payload.payload_state = mgr->payloads[i].payload_state;
5271 serge 1702
				mgr->payloads[i].start_slot = 0;
1703
			}
5060 serge 1704
			mgr->payloads[i].payload_state = req_payload.payload_state;
1705
		}
1706
		cur_slots += req_payload.num_slots;
1707
	}
5271 serge 1708
 
1709
	for (i = 0; i < mgr->max_payloads; i++) {
1710
		if (mgr->payloads[i].payload_state == DP_PAYLOAD_DELETE_LOCAL) {
1711
			DRM_DEBUG_KMS("removing payload %d\n", i);
1712
			for (j = i; j < mgr->max_payloads - 1; j++) {
1713
				memcpy(&mgr->payloads[j], &mgr->payloads[j + 1], sizeof(struct drm_dp_payload));
1714
				mgr->proposed_vcpis[j] = mgr->proposed_vcpis[j + 1];
1715
				if (mgr->proposed_vcpis[j] && mgr->proposed_vcpis[j]->num_slots) {
1716
					set_bit(j + 1, &mgr->payload_mask);
1717
				} else {
1718
					clear_bit(j + 1, &mgr->payload_mask);
1719
				}
1720
			}
1721
			memset(&mgr->payloads[mgr->max_payloads - 1], 0, sizeof(struct drm_dp_payload));
1722
			mgr->proposed_vcpis[mgr->max_payloads - 1] = NULL;
1723
			clear_bit(mgr->max_payloads, &mgr->payload_mask);
1724
 
1725
		}
1726
	}
5060 serge 1727
	mutex_unlock(&mgr->payload_lock);
1728
 
1729
	return 0;
1730
}
1731
EXPORT_SYMBOL(drm_dp_update_payload_part1);
1732
 
1733
/**
1734
 * drm_dp_update_payload_part2() - Execute payload update part 2
1735
 * @mgr: manager to use.
1736
 *
1737
 * This iterates over all proposed virtual channels, and tries to
1738
 * allocate space in the link for them. For 0->slots transitions,
1739
 * this step writes the remote VC payload commands. For slots->0
1740
 * this just resets some internal state.
1741
 */
1742
int drm_dp_update_payload_part2(struct drm_dp_mst_topology_mgr *mgr)
1743
{
1744
	struct drm_dp_mst_port *port;
1745
	int i;
1746
	int ret = 0;
1747
	mutex_lock(&mgr->payload_lock);
1748
	for (i = 0; i < mgr->max_payloads; i++) {
1749
 
1750
		if (!mgr->proposed_vcpis[i])
1751
			continue;
1752
 
1753
		port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
1754
 
1755
		DRM_DEBUG_KMS("payload %d %d\n", i, mgr->payloads[i].payload_state);
1756
		if (mgr->payloads[i].payload_state == DP_PAYLOAD_LOCAL) {
5271 serge 1757
			ret = drm_dp_create_payload_step2(mgr, port, mgr->proposed_vcpis[i]->vcpi, &mgr->payloads[i]);
5060 serge 1758
		} else if (mgr->payloads[i].payload_state == DP_PAYLOAD_DELETE_LOCAL) {
5271 serge 1759
			ret = drm_dp_destroy_payload_step2(mgr, mgr->proposed_vcpis[i]->vcpi, &mgr->payloads[i]);
5060 serge 1760
		}
1761
		if (ret) {
1762
			mutex_unlock(&mgr->payload_lock);
1763
			return ret;
1764
		}
1765
	}
1766
	mutex_unlock(&mgr->payload_lock);
1767
	return 0;
1768
}
1769
EXPORT_SYMBOL(drm_dp_update_payload_part2);
1770
 
1771
#if 0 /* unused as of yet */
1772
static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
1773
				 struct drm_dp_mst_port *port,
1774
				 int offset, int size)
1775
{
1776
	int len;
1777
	struct drm_dp_sideband_msg_tx *txmsg;
1778
 
1779
	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
1780
	if (!txmsg)
1781
		return -ENOMEM;
1782
 
1783
	len = build_dpcd_read(txmsg, port->port_num, 0, 8);
1784
	txmsg->dst = port->parent;
1785
 
1786
	drm_dp_queue_down_tx(mgr, txmsg);
1787
 
1788
	return 0;
1789
}
1790
#endif
1791
 
1792
static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
1793
				  struct drm_dp_mst_port *port,
1794
				  int offset, int size, u8 *bytes)
1795
{
1796
	int len;
1797
	int ret;
1798
	struct drm_dp_sideband_msg_tx *txmsg;
1799
	struct drm_dp_mst_branch *mstb;
1800
 
1801
	mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
1802
	if (!mstb)
1803
		return -EINVAL;
1804
 
1805
	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
1806
	if (!txmsg) {
1807
		ret = -ENOMEM;
1808
		goto fail_put;
1809
	}
1810
 
1811
	len = build_dpcd_write(txmsg, port->port_num, offset, size, bytes);
1812
	txmsg->dst = mstb;
1813
 
1814
	drm_dp_queue_down_tx(mgr, txmsg);
1815
 
1816
	ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
1817
	if (ret > 0) {
1818
		if (txmsg->reply.reply_type == 1) {
1819
			ret = -EINVAL;
1820
		} else
1821
			ret = 0;
1822
	}
1823
	kfree(txmsg);
1824
fail_put:
1825
	drm_dp_put_mst_branch_device(mstb);
1826
	return ret;
1827
}
1828
 
1829
static int drm_dp_encode_up_ack_reply(struct drm_dp_sideband_msg_tx *msg, u8 req_type)
1830
{
1831
	struct drm_dp_sideband_msg_reply_body reply;
1832
 
1833
	reply.reply_type = 1;
1834
	reply.req_type = req_type;
1835
	drm_dp_encode_sideband_reply(&reply, msg);
1836
	return 0;
1837
}
1838
 
1839
static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr,
1840
				    struct drm_dp_mst_branch *mstb,
1841
				    int req_type, int seqno, bool broadcast)
1842
{
1843
	struct drm_dp_sideband_msg_tx *txmsg;
1844
 
1845
	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
1846
	if (!txmsg)
1847
		return -ENOMEM;
1848
 
1849
	txmsg->dst = mstb;
1850
	txmsg->seqno = seqno;
1851
	drm_dp_encode_up_ack_reply(txmsg, req_type);
1852
 
1853
	mutex_lock(&mgr->qlock);
1854
	list_add_tail(&txmsg->next, &mgr->tx_msg_upq);
1855
	if (!mgr->tx_up_in_progress) {
1856
		process_single_up_tx_qlock(mgr);
1857
	}
1858
	mutex_unlock(&mgr->qlock);
1859
	return 0;
1860
}
1861
 
5271 serge 1862
static bool drm_dp_get_vc_payload_bw(int dp_link_bw,
1863
				     int dp_link_count,
1864
				     int *out)
5060 serge 1865
{
1866
	switch (dp_link_bw) {
5271 serge 1867
	default:
1868
		DRM_DEBUG_KMS("invalid link bandwidth in DPCD: %x (link count: %d)\n",
1869
			      dp_link_bw, dp_link_count);
1870
		return false;
1871
 
5060 serge 1872
	case DP_LINK_BW_1_62:
5271 serge 1873
		*out = 3 * dp_link_count;
1874
		break;
5060 serge 1875
	case DP_LINK_BW_2_7:
5271 serge 1876
		*out = 5 * dp_link_count;
1877
		break;
5060 serge 1878
	case DP_LINK_BW_5_4:
5271 serge 1879
		*out = 10 * dp_link_count;
1880
		break;
5060 serge 1881
	}
5271 serge 1882
	return true;
5060 serge 1883
}
1884
 
1885
/**
1886
 * drm_dp_mst_topology_mgr_set_mst() - Set the MST state for a topology manager
1887
 * @mgr: manager to set state for
1888
 * @mst_state: true to enable MST on this connector - false to disable.
1889
 *
1890
 * This is called by the driver when it detects an MST capable device plugged
1891
 * into a DP MST capable port, or when a DP MST capable device is unplugged.
1892
 */
1893
int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool mst_state)
1894
{
1895
	int ret = 0;
1896
	struct drm_dp_mst_branch *mstb = NULL;
1897
 
1898
	mutex_lock(&mgr->lock);
1899
	if (mst_state == mgr->mst_state)
1900
		goto out_unlock;
1901
 
1902
	mgr->mst_state = mst_state;
1903
	/* set the device into MST mode */
1904
	if (mst_state) {
1905
		WARN_ON(mgr->mst_primary);
1906
 
1907
		/* get dpcd info */
1908
		ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE);
1909
		if (ret != DP_RECEIVER_CAP_SIZE) {
1910
			DRM_DEBUG_KMS("failed to read DPCD\n");
1911
			goto out_unlock;
1912
		}
1913
 
5271 serge 1914
		if (!drm_dp_get_vc_payload_bw(mgr->dpcd[1],
1915
					      mgr->dpcd[2] & DP_MAX_LANE_COUNT_MASK,
1916
					      &mgr->pbn_div)) {
1917
			ret = -EINVAL;
1918
			goto out_unlock;
1919
		}
1920
 
5060 serge 1921
		mgr->total_pbn = 2560;
1922
		mgr->total_slots = DIV_ROUND_UP(mgr->total_pbn, mgr->pbn_div);
1923
		mgr->avail_slots = mgr->total_slots;
1924
 
1925
		/* add initial branch device at LCT 1 */
1926
		mstb = drm_dp_add_mst_branch_device(1, NULL);
1927
		if (mstb == NULL) {
1928
			ret = -ENOMEM;
1929
			goto out_unlock;
1930
		}
1931
		mstb->mgr = mgr;
1932
 
1933
		/* give this the main reference */
1934
		mgr->mst_primary = mstb;
1935
		kref_get(&mgr->mst_primary->kref);
1936
 
1937
		{
1938
			struct drm_dp_payload reset_pay;
1939
			reset_pay.start_slot = 0;
1940
			reset_pay.num_slots = 0x3f;
1941
			drm_dp_dpcd_write_payload(mgr, 0, &reset_pay);
1942
		}
1943
 
1944
		ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
1945
					 DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
1946
		if (ret < 0) {
1947
			goto out_unlock;
1948
		}
1949
 
1950
 
1951
		/* sort out guid */
1952
		ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, mgr->guid, 16);
1953
		if (ret != 16) {
1954
			DRM_DEBUG_KMS("failed to read DP GUID %d\n", ret);
1955
			goto out_unlock;
1956
		}
1957
 
1958
		mgr->guid_valid = drm_dp_validate_guid(mgr, mgr->guid);
1959
		if (!mgr->guid_valid) {
1960
			ret = drm_dp_dpcd_write(mgr->aux, DP_GUID, mgr->guid, 16);
1961
			mgr->guid_valid = true;
1962
		}
1963
 
1964
//       queue_work(system_long_wq, &mgr->work);
1965
 
1966
		ret = 0;
1967
	} else {
1968
		/* disable MST on the device */
1969
		mstb = mgr->mst_primary;
1970
		mgr->mst_primary = NULL;
1971
		/* this can fail if the device is gone */
1972
		drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 0);
1973
		ret = 0;
1974
		memset(mgr->payloads, 0, mgr->max_payloads * sizeof(struct drm_dp_payload));
1975
		mgr->payload_mask = 0;
1976
		set_bit(0, &mgr->payload_mask);
5271 serge 1977
		mgr->vcpi_mask = 0;
5060 serge 1978
	}
1979
 
1980
out_unlock:
1981
	mutex_unlock(&mgr->lock);
1982
	if (mstb)
1983
		drm_dp_put_mst_branch_device(mstb);
1984
	return ret;
1985
 
1986
}
1987
EXPORT_SYMBOL(drm_dp_mst_topology_mgr_set_mst);
1988
 
1989
/**
1990
 * drm_dp_mst_topology_mgr_suspend() - suspend the MST manager
1991
 * @mgr: manager to suspend
1992
 *
1993
 * This function tells the MST device that we can't handle UP messages
1994
 * anymore. This should stop it from sending any since we are suspended.
1995
 */
1996
void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr)
1997
{
1998
	mutex_lock(&mgr->lock);
1999
	drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
2000
			   DP_MST_EN | DP_UPSTREAM_IS_SRC);
2001
	mutex_unlock(&mgr->lock);
2002
}
2003
EXPORT_SYMBOL(drm_dp_mst_topology_mgr_suspend);
2004
 
2005
/**
2006
 * drm_dp_mst_topology_mgr_resume() - resume the MST manager
2007
 * @mgr: manager to resume
2008
 *
2009
 * This will fetch DPCD and see if the device is still there,
2010
 * if it is, it will rewrite the MSTM control bits, and return.
2011
 *
2012
 * if the device fails this returns -1, and the driver should do
2013
 * a full MST reprobe, in case we were undocked.
2014
 */
2015
int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr)
2016
{
2017
	int ret = 0;
2018
 
2019
	mutex_lock(&mgr->lock);
2020
 
2021
	if (mgr->mst_primary) {
2022
		int sret;
2023
		sret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE);
2024
		if (sret != DP_RECEIVER_CAP_SIZE) {
2025
			DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
2026
			ret = -1;
2027
			goto out_unlock;
2028
		}
2029
 
2030
		ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
2031
					 DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
2032
		if (ret < 0) {
2033
			DRM_DEBUG_KMS("mst write failed - undocked during suspend?\n");
2034
			ret = -1;
2035
			goto out_unlock;
2036
		}
2037
		ret = 0;
2038
	} else
2039
		ret = -1;
2040
 
2041
out_unlock:
2042
	mutex_unlock(&mgr->lock);
2043
	return ret;
2044
}
2045
EXPORT_SYMBOL(drm_dp_mst_topology_mgr_resume);
2046
 
2047
static void drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up)
2048
{
2049
	int len;
2050
	u8 replyblock[32];
2051
	int replylen, origlen, curreply;
2052
	int ret;
2053
	struct drm_dp_sideband_msg_rx *msg;
2054
	int basereg = up ? DP_SIDEBAND_MSG_UP_REQ_BASE : DP_SIDEBAND_MSG_DOWN_REP_BASE;
2055
	msg = up ? &mgr->up_req_recv : &mgr->down_rep_recv;
2056
 
2057
	len = min(mgr->max_dpcd_transaction_bytes, 16);
2058
	ret = drm_dp_dpcd_read(mgr->aux, basereg,
2059
			       replyblock, len);
2060
	if (ret != len) {
2061
		DRM_DEBUG_KMS("failed to read DPCD down rep %d %d\n", len, ret);
2062
		return;
2063
	}
2064
	ret = drm_dp_sideband_msg_build(msg, replyblock, len, true);
2065
	if (!ret) {
2066
		DRM_DEBUG_KMS("sideband msg build failed %d\n", replyblock[0]);
2067
		return;
2068
	}
2069
	replylen = msg->curchunk_len + msg->curchunk_hdrlen;
2070
 
2071
	origlen = replylen;
2072
	replylen -= len;
2073
	curreply = len;
2074
	while (replylen > 0) {
2075
		len = min3(replylen, mgr->max_dpcd_transaction_bytes, 16);
2076
		ret = drm_dp_dpcd_read(mgr->aux, basereg + curreply,
2077
				    replyblock, len);
2078
		if (ret != len) {
2079
			DRM_DEBUG_KMS("failed to read a chunk\n");
2080
		}
2081
		ret = drm_dp_sideband_msg_build(msg, replyblock, len, false);
2082
		if (ret == false)
2083
			DRM_DEBUG_KMS("failed to build sideband msg\n");
2084
		curreply += len;
2085
		replylen -= len;
2086
	}
2087
}
2088
 
2089
static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
2090
{
2091
	int ret = 0;
2092
 
2093
	drm_dp_get_one_sb_msg(mgr, false);
2094
 
2095
	if (mgr->down_rep_recv.have_eomt) {
2096
		struct drm_dp_sideband_msg_tx *txmsg;
2097
		struct drm_dp_mst_branch *mstb;
2098
		int slot = -1;
2099
		mstb = drm_dp_get_mst_branch_device(mgr,
2100
						    mgr->down_rep_recv.initial_hdr.lct,
2101
						    mgr->down_rep_recv.initial_hdr.rad);
2102
 
2103
		if (!mstb) {
2104
			DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->down_rep_recv.initial_hdr.lct);
2105
			memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2106
			return 0;
2107
		}
2108
 
2109
		/* find the message */
2110
		slot = mgr->down_rep_recv.initial_hdr.seqno;
2111
		mutex_lock(&mgr->qlock);
2112
		txmsg = mstb->tx_slots[slot];
2113
		/* remove from slots */
2114
		mutex_unlock(&mgr->qlock);
2115
 
2116
		if (!txmsg) {
2117
			DRM_DEBUG_KMS("Got MST reply with no msg %p %d %d %02x %02x\n",
2118
			       mstb,
2119
			       mgr->down_rep_recv.initial_hdr.seqno,
2120
			       mgr->down_rep_recv.initial_hdr.lct,
2121
				      mgr->down_rep_recv.initial_hdr.rad[0],
2122
				      mgr->down_rep_recv.msg[0]);
2123
			drm_dp_put_mst_branch_device(mstb);
2124
			memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2125
			return 0;
2126
		}
2127
 
2128
		drm_dp_sideband_parse_reply(&mgr->down_rep_recv, &txmsg->reply);
2129
		if (txmsg->reply.reply_type == 1) {
2130
			DRM_DEBUG_KMS("Got NAK reply: req 0x%02x, reason 0x%02x, nak data 0x%02x\n", txmsg->reply.req_type, txmsg->reply.u.nak.reason, txmsg->reply.u.nak.nak_data);
2131
		}
2132
 
2133
		memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2134
		drm_dp_put_mst_branch_device(mstb);
2135
 
2136
		mutex_lock(&mgr->qlock);
2137
		txmsg->state = DRM_DP_SIDEBAND_TX_RX;
2138
		mstb->tx_slots[slot] = NULL;
2139
		mutex_unlock(&mgr->qlock);
2140
 
2141
//       wake_up(&mgr->tx_waitq);
2142
	}
2143
	return ret;
2144
}
2145
 
2146
static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
2147
{
2148
	int ret = 0;
2149
	drm_dp_get_one_sb_msg(mgr, true);
2150
 
2151
	if (mgr->up_req_recv.have_eomt) {
2152
		struct drm_dp_sideband_msg_req_body msg;
2153
		struct drm_dp_mst_branch *mstb;
2154
		bool seqno;
2155
		mstb = drm_dp_get_mst_branch_device(mgr,
2156
						    mgr->up_req_recv.initial_hdr.lct,
2157
						    mgr->up_req_recv.initial_hdr.rad);
2158
		if (!mstb) {
2159
			DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
2160
			memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2161
			return 0;
2162
		}
2163
 
2164
		seqno = mgr->up_req_recv.initial_hdr.seqno;
2165
		drm_dp_sideband_parse_req(&mgr->up_req_recv, &msg);
2166
 
2167
		if (msg.req_type == DP_CONNECTION_STATUS_NOTIFY) {
2168
			drm_dp_send_up_ack_reply(mgr, mstb, msg.req_type, seqno, false);
2169
			drm_dp_update_port(mstb, &msg.u.conn_stat);
2170
			DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n", msg.u.conn_stat.port_number, msg.u.conn_stat.legacy_device_plug_status, msg.u.conn_stat.displayport_device_plug_status, msg.u.conn_stat.message_capability_status, msg.u.conn_stat.input_port, msg.u.conn_stat.peer_device_type);
2171
			(*mgr->cbs->hotplug)(mgr);
2172
 
2173
		} else if (msg.req_type == DP_RESOURCE_STATUS_NOTIFY) {
2174
			drm_dp_send_up_ack_reply(mgr, mstb, msg.req_type, seqno, false);
2175
			DRM_DEBUG_KMS("Got RSN: pn: %d avail_pbn %d\n", msg.u.resource_stat.port_number, msg.u.resource_stat.available_pbn);
2176
		}
2177
 
2178
		drm_dp_put_mst_branch_device(mstb);
2179
		memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2180
	}
2181
	return ret;
2182
}
2183
 
2184
/**
2185
 * drm_dp_mst_hpd_irq() - MST hotplug IRQ notify
2186
 * @mgr: manager to notify irq for.
2187
 * @esi: 4 bytes from SINK_COUNT_ESI
5271 serge 2188
 * @handled: whether the hpd interrupt was consumed or not
5060 serge 2189
 *
2190
 * This should be called from the driver when it detects a short IRQ,
2191
 * along with the value of the DEVICE_SERVICE_IRQ_VECTOR_ESI0. The
2192
 * topology manager will process the sideband messages received as a result
2193
 * of this.
2194
 */
2195
int drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr *mgr, u8 *esi, bool *handled)
2196
{
2197
	int ret = 0;
2198
	int sc;
2199
	*handled = false;
2200
	sc = esi[0] & 0x3f;
2201
 
2202
	if (sc != mgr->sink_count) {
2203
		mgr->sink_count = sc;
2204
		*handled = true;
2205
	}
2206
 
2207
	if (esi[1] & DP_DOWN_REP_MSG_RDY) {
2208
		ret = drm_dp_mst_handle_down_rep(mgr);
2209
		*handled = true;
2210
	}
2211
 
2212
	if (esi[1] & DP_UP_REQ_MSG_RDY) {
2213
		ret |= drm_dp_mst_handle_up_req(mgr);
2214
		*handled = true;
2215
	}
2216
 
2217
	drm_dp_mst_kick_tx(mgr);
2218
	return ret;
2219
}
2220
EXPORT_SYMBOL(drm_dp_mst_hpd_irq);
2221
 
2222
/**
2223
 * drm_dp_mst_detect_port() - get connection status for an MST port
2224
 * @mgr: manager for this port
2225
 * @port: unverified pointer to a port
2226
 *
2227
 * This returns the current connection state for a port. It validates the
2228
 * port pointer still exists so the caller doesn't require a reference
2229
 */
5271 serge 2230
enum drm_connector_status drm_dp_mst_detect_port(struct drm_connector *connector,
2231
						 struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
5060 serge 2232
{
2233
	enum drm_connector_status status = connector_status_disconnected;
2234
 
2235
	/* we need to search for the port in the mgr in case its gone */
2236
	port = drm_dp_get_validated_port_ref(mgr, port);
2237
	if (!port)
2238
		return connector_status_disconnected;
2239
 
2240
	if (!port->ddps)
2241
		goto out;
2242
 
2243
	switch (port->pdt) {
2244
	case DP_PEER_DEVICE_NONE:
2245
	case DP_PEER_DEVICE_MST_BRANCHING:
2246
		break;
2247
 
2248
	case DP_PEER_DEVICE_SST_SINK:
2249
		status = connector_status_connected;
5271 serge 2250
		/* for logical ports - cache the EDID */
2251
		if (port->port_num >= 8 && !port->cached_edid) {
2252
			port->cached_edid = drm_get_edid(connector, &port->aux.ddc);
2253
		}
5060 serge 2254
		break;
2255
	case DP_PEER_DEVICE_DP_LEGACY_CONV:
2256
		if (port->ldps)
2257
			status = connector_status_connected;
2258
		break;
2259
	}
2260
out:
2261
	drm_dp_put_port(port);
2262
	return status;
2263
}
2264
EXPORT_SYMBOL(drm_dp_mst_detect_port);
2265
 
2266
/**
2267
 * drm_dp_mst_get_edid() - get EDID for an MST port
2268
 * @connector: toplevel connector to get EDID for
2269
 * @mgr: manager for this port
2270
 * @port: unverified pointer to a port.
2271
 *
2272
 * This returns an EDID for the port connected to a connector,
2273
 * It validates the pointer still exists so the caller doesn't require a
2274
 * reference.
2275
 */
2276
struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
2277
{
2278
	struct edid *edid = NULL;
2279
 
2280
	/* we need to search for the port in the mgr in case its gone */
2281
	port = drm_dp_get_validated_port_ref(mgr, port);
2282
	if (!port)
2283
		return NULL;
2284
 
5271 serge 2285
	if (port->cached_edid)
2286
		edid = drm_edid_duplicate(port->cached_edid);
6084 serge 2287
	else {
2288
		edid = drm_get_edid(connector, &port->aux.ddc);
2289
		drm_mode_connector_set_tile_property(connector);
2290
	}
5060 serge 2291
	drm_dp_put_port(port);
2292
	return edid;
2293
}
2294
EXPORT_SYMBOL(drm_dp_mst_get_edid);
2295
 
2296
/**
2297
 * drm_dp_find_vcpi_slots() - find slots for this PBN value
2298
 * @mgr: manager to use
2299
 * @pbn: payload bandwidth to convert into slots.
2300
 */
2301
int drm_dp_find_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr,
2302
			   int pbn)
2303
{
2304
	int num_slots;
2305
 
2306
	num_slots = DIV_ROUND_UP(pbn, mgr->pbn_div);
2307
 
2308
	if (num_slots > mgr->avail_slots)
2309
		return -ENOSPC;
2310
	return num_slots;
2311
}
2312
EXPORT_SYMBOL(drm_dp_find_vcpi_slots);
2313
 
2314
static int drm_dp_init_vcpi(struct drm_dp_mst_topology_mgr *mgr,
2315
			    struct drm_dp_vcpi *vcpi, int pbn)
2316
{
2317
	int num_slots;
2318
	int ret;
2319
 
2320
	num_slots = DIV_ROUND_UP(pbn, mgr->pbn_div);
2321
 
2322
	if (num_slots > mgr->avail_slots)
2323
		return -ENOSPC;
2324
 
2325
	vcpi->pbn = pbn;
2326
	vcpi->aligned_pbn = num_slots * mgr->pbn_div;
2327
	vcpi->num_slots = num_slots;
2328
 
2329
	ret = drm_dp_mst_assign_payload_id(mgr, vcpi);
2330
	if (ret < 0)
2331
		return ret;
2332
	return 0;
2333
}
2334
 
2335
/**
2336
 * drm_dp_mst_allocate_vcpi() - Allocate a virtual channel
2337
 * @mgr: manager for this port
2338
 * @port: port to allocate a virtual channel for.
2339
 * @pbn: payload bandwidth number to request
2340
 * @slots: returned number of slots for this PBN.
2341
 */
2342
bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, int pbn, int *slots)
2343
{
2344
	int ret;
2345
 
2346
	port = drm_dp_get_validated_port_ref(mgr, port);
2347
	if (!port)
2348
		return false;
2349
 
2350
	if (port->vcpi.vcpi > 0) {
2351
		DRM_DEBUG_KMS("payload: vcpi %d already allocated for pbn %d - requested pbn %d\n", port->vcpi.vcpi, port->vcpi.pbn, pbn);
2352
		if (pbn == port->vcpi.pbn) {
2353
			*slots = port->vcpi.num_slots;
2354
			return true;
2355
		}
2356
	}
2357
 
2358
	ret = drm_dp_init_vcpi(mgr, &port->vcpi, pbn);
2359
	if (ret) {
2360
		DRM_DEBUG_KMS("failed to init vcpi %d %d %d\n", DIV_ROUND_UP(pbn, mgr->pbn_div), mgr->avail_slots, ret);
2361
		goto out;
2362
	}
2363
	DRM_DEBUG_KMS("initing vcpi for %d %d\n", pbn, port->vcpi.num_slots);
2364
	*slots = port->vcpi.num_slots;
2365
 
2366
	drm_dp_put_port(port);
2367
	return true;
2368
out:
2369
	return false;
2370
}
2371
EXPORT_SYMBOL(drm_dp_mst_allocate_vcpi);
2372
 
6084 serge 2373
int drm_dp_mst_get_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
2374
{
2375
	int slots = 0;
2376
	port = drm_dp_get_validated_port_ref(mgr, port);
2377
	if (!port)
2378
		return slots;
2379
 
2380
	slots = port->vcpi.num_slots;
2381
	drm_dp_put_port(port);
2382
	return slots;
2383
}
2384
EXPORT_SYMBOL(drm_dp_mst_get_vcpi_slots);
2385
 
5060 serge 2386
/**
2387
 * drm_dp_mst_reset_vcpi_slots() - Reset number of slots to 0 for VCPI
2388
 * @mgr: manager for this port
2389
 * @port: unverified pointer to a port.
2390
 *
2391
 * This just resets the number of slots for the ports VCPI for later programming.
2392
 */
2393
void drm_dp_mst_reset_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
2394
{
2395
	port = drm_dp_get_validated_port_ref(mgr, port);
2396
	if (!port)
2397
		return;
2398
	port->vcpi.num_slots = 0;
2399
	drm_dp_put_port(port);
2400
}
2401
EXPORT_SYMBOL(drm_dp_mst_reset_vcpi_slots);
2402
 
2403
/**
2404
 * drm_dp_mst_deallocate_vcpi() - deallocate a VCPI
2405
 * @mgr: manager for this port
2406
 * @port: unverified port to deallocate vcpi for
2407
 */
2408
void drm_dp_mst_deallocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
2409
{
2410
	port = drm_dp_get_validated_port_ref(mgr, port);
2411
	if (!port)
2412
		return;
2413
 
2414
	drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
2415
	port->vcpi.num_slots = 0;
2416
	port->vcpi.pbn = 0;
2417
	port->vcpi.aligned_pbn = 0;
2418
	port->vcpi.vcpi = 0;
2419
	drm_dp_put_port(port);
2420
}
2421
EXPORT_SYMBOL(drm_dp_mst_deallocate_vcpi);
2422
 
2423
static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
2424
				     int id, struct drm_dp_payload *payload)
2425
{
2426
	u8 payload_alloc[3], status;
2427
	int ret;
2428
	int retries = 0;
2429
 
2430
	drm_dp_dpcd_writeb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS,
2431
			   DP_PAYLOAD_TABLE_UPDATED);
2432
 
2433
	payload_alloc[0] = id;
2434
	payload_alloc[1] = payload->start_slot;
2435
	payload_alloc[2] = payload->num_slots;
2436
 
2437
	ret = drm_dp_dpcd_write(mgr->aux, DP_PAYLOAD_ALLOCATE_SET, payload_alloc, 3);
2438
	if (ret != 3) {
2439
		DRM_DEBUG_KMS("failed to write payload allocation %d\n", ret);
2440
		goto fail;
2441
	}
2442
 
2443
retry:
2444
	ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
2445
	if (ret < 0) {
2446
		DRM_DEBUG_KMS("failed to read payload table status %d\n", ret);
2447
		goto fail;
2448
	}
2449
 
2450
	if (!(status & DP_PAYLOAD_TABLE_UPDATED)) {
2451
		retries++;
2452
		if (retries < 20) {
2453
			usleep_range(10000, 20000);
2454
			goto retry;
2455
		}
2456
		DRM_DEBUG_KMS("status not set after read payload table status %d\n", status);
2457
		ret = -EINVAL;
2458
		goto fail;
2459
	}
2460
	ret = 0;
2461
fail:
2462
	return ret;
2463
}
2464
 
2465
 
2466
/**
2467
 * drm_dp_check_act_status() - Check ACT handled status.
2468
 * @mgr: manager to use
2469
 *
2470
 * Check the payload status bits in the DPCD for ACT handled completion.
2471
 */
2472
int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr)
2473
{
2474
	u8 status;
2475
	int ret;
2476
	int count = 0;
2477
 
2478
	do {
2479
		ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
2480
 
2481
		if (ret < 0) {
2482
			DRM_DEBUG_KMS("failed to read payload table status %d\n", ret);
2483
			goto fail;
2484
		}
2485
 
2486
		if (status & DP_PAYLOAD_ACT_HANDLED)
2487
			break;
2488
		count++;
2489
		udelay(100);
2490
 
2491
	} while (count < 30);
2492
 
2493
	if (!(status & DP_PAYLOAD_ACT_HANDLED)) {
2494
		DRM_DEBUG_KMS("failed to get ACT bit %d after %d retries\n", status, count);
2495
		ret = -EINVAL;
2496
		goto fail;
2497
	}
2498
	return 0;
2499
fail:
2500
	return ret;
2501
}
2502
EXPORT_SYMBOL(drm_dp_check_act_status);
2503
 
2504
/**
2505
 * drm_dp_calc_pbn_mode() - Calculate the PBN for a mode.
2506
 * @clock: dot clock for the mode
2507
 * @bpp: bpp for the mode.
2508
 *
2509
 * This uses the formula in the spec to calculate the PBN value for a mode.
2510
 */
2511
int drm_dp_calc_pbn_mode(int clock, int bpp)
2512
{
2513
	fixed20_12 pix_bw;
2514
	fixed20_12 fbpp;
2515
	fixed20_12 result;
2516
	fixed20_12 margin, tmp;
2517
	u32 res;
2518
 
2519
	pix_bw.full = dfixed_const(clock);
2520
	fbpp.full = dfixed_const(bpp);
2521
	tmp.full = dfixed_const(8);
2522
	fbpp.full = dfixed_div(fbpp, tmp);
2523
 
2524
	result.full = dfixed_mul(pix_bw, fbpp);
2525
	margin.full = dfixed_const(54);
2526
	tmp.full = dfixed_const(64);
2527
	margin.full = dfixed_div(margin, tmp);
2528
	result.full = dfixed_div(result, margin);
2529
 
2530
	margin.full = dfixed_const(1006);
2531
	tmp.full = dfixed_const(1000);
2532
	margin.full = dfixed_div(margin, tmp);
2533
	result.full = dfixed_mul(result, margin);
2534
 
2535
	result.full = dfixed_div(result, tmp);
2536
	result.full = dfixed_ceil(result);
2537
	res = dfixed_trunc(result);
2538
	return res;
2539
}
2540
EXPORT_SYMBOL(drm_dp_calc_pbn_mode);
2541
 
2542
static int test_calc_pbn_mode(void)
2543
{
2544
	int ret;
2545
	ret = drm_dp_calc_pbn_mode(154000, 30);
2546
	if (ret != 689)
2547
		return -EINVAL;
2548
	ret = drm_dp_calc_pbn_mode(234000, 30);
2549
	if (ret != 1047)
2550
		return -EINVAL;
2551
	return 0;
2552
}
2553
 
2554
/* we want to kick the TX after we've ack the up/down IRQs. */
2555
static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr)
2556
{
2557
//   queue_work(system_long_wq, &mgr->tx_work);
2558
}
2559
 
2560
static void drm_dp_mst_dump_mstb(struct seq_file *m,
2561
				 struct drm_dp_mst_branch *mstb)
2562
{
2563
	struct drm_dp_mst_port *port;
2564
	int tabs = mstb->lct;
2565
	char prefix[10];
2566
	int i;
2567
 
2568
	for (i = 0; i < tabs; i++)
2569
		prefix[i] = '\t';
2570
	prefix[i] = '\0';
2571
 
2572
//   seq_printf(m, "%smst: %p, %d\n", prefix, mstb, mstb->num_ports);
2573
//   list_for_each_entry(port, &mstb->ports, next) {
2574
//       seq_printf(m, "%sport: %d: ddps: %d ldps: %d, %p, conn: %p\n", prefix, port->port_num, port->ddps, port->ldps, port, port->connector);
2575
//       if (port->mstb)
2576
//           drm_dp_mst_dump_mstb(m, port->mstb);
2577
//   }
2578
}
2579
 
2580
static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
2581
				  char *buf)
2582
{
2583
	int ret;
2584
	int i;
2585
	for (i = 0; i < 4; i++) {
2586
		ret = drm_dp_dpcd_read(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS + (i * 16), &buf[i * 16], 16);
2587
		if (ret != 16)
2588
			break;
2589
	}
2590
	if (i == 4)
2591
		return true;
2592
	return false;
2593
}
2594
 
2595
/**
2596
 * drm_dp_mst_dump_topology(): dump topology to seq file.
2597
 * @m: seq_file to dump output to
2598
 * @mgr: manager to dump current topology for.
2599
 *
2600
 * helper to dump MST topology to a seq file for debugfs.
2601
 */
2602
void drm_dp_mst_dump_topology(struct seq_file *m,
2603
			      struct drm_dp_mst_topology_mgr *mgr)
2604
{
2605
	int i;
2606
	struct drm_dp_mst_port *port;
2607
	mutex_lock(&mgr->lock);
2608
	if (mgr->mst_primary)
2609
		drm_dp_mst_dump_mstb(m, mgr->mst_primary);
2610
 
2611
	/* dump VCPIs */
2612
	mutex_unlock(&mgr->lock);
2613
 
2614
 
2615
 
2616
}
2617
EXPORT_SYMBOL(drm_dp_mst_dump_topology);
2618
 
2619
static void drm_dp_tx_work(struct work_struct *work)
2620
{
2621
	struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, tx_work);
2622
 
2623
	mutex_lock(&mgr->qlock);
2624
	if (mgr->tx_down_in_progress)
2625
		process_single_down_tx_qlock(mgr);
2626
	mutex_unlock(&mgr->qlock);
2627
}
2628
 
2629
/**
2630
 * drm_dp_mst_topology_mgr_init - initialise a topology manager
2631
 * @mgr: manager struct to initialise
2632
 * @dev: device providing this structure - for i2c addition.
2633
 * @aux: DP helper aux channel to talk to this device
2634
 * @max_dpcd_transaction_bytes: hw specific DPCD transaction limit
2635
 * @max_payloads: maximum number of payloads this GPU can source
2636
 * @conn_base_id: the connector object ID the MST device is connected to.
2637
 *
2638
 * Return 0 for success, or negative error code on failure
2639
 */
2640
int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
2641
				 struct device *dev, struct drm_dp_aux *aux,
2642
				 int max_dpcd_transaction_bytes,
2643
				 int max_payloads, int conn_base_id)
2644
{
2645
	mutex_init(&mgr->lock);
2646
	mutex_init(&mgr->qlock);
2647
	mutex_init(&mgr->payload_lock);
6084 serge 2648
	mutex_init(&mgr->destroy_connector_lock);
5060 serge 2649
	INIT_LIST_HEAD(&mgr->tx_msg_upq);
2650
	INIT_LIST_HEAD(&mgr->tx_msg_downq);
6084 serge 2651
	INIT_LIST_HEAD(&mgr->destroy_connector_list);
5060 serge 2652
	INIT_WORK(&mgr->work, drm_dp_mst_link_probe_work);
2653
	INIT_WORK(&mgr->tx_work, drm_dp_tx_work);
2654
//   init_waitqueue_head(&mgr->tx_waitq);
2655
	mgr->dev = dev;
2656
	mgr->aux = aux;
2657
	mgr->max_dpcd_transaction_bytes = max_dpcd_transaction_bytes;
2658
	mgr->max_payloads = max_payloads;
2659
	mgr->conn_base_id = conn_base_id;
2660
	mgr->payloads = kcalloc(max_payloads, sizeof(struct drm_dp_payload), GFP_KERNEL);
2661
	if (!mgr->payloads)
2662
		return -ENOMEM;
2663
	mgr->proposed_vcpis = kcalloc(max_payloads, sizeof(struct drm_dp_vcpi *), GFP_KERNEL);
2664
	if (!mgr->proposed_vcpis)
2665
		return -ENOMEM;
2666
	set_bit(0, &mgr->payload_mask);
2667
	test_calc_pbn_mode();
2668
	return 0;
2669
}
2670
EXPORT_SYMBOL(drm_dp_mst_topology_mgr_init);
2671
 
2672
/**
2673
 * drm_dp_mst_topology_mgr_destroy() - destroy topology manager.
2674
 * @mgr: manager to destroy
2675
 */
2676
void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr)
2677
{
2678
	mutex_lock(&mgr->payload_lock);
2679
	kfree(mgr->payloads);
2680
	mgr->payloads = NULL;
2681
	kfree(mgr->proposed_vcpis);
2682
	mgr->proposed_vcpis = NULL;
2683
	mutex_unlock(&mgr->payload_lock);
2684
	mgr->dev = NULL;
2685
	mgr->aux = NULL;
2686
}
2687
EXPORT_SYMBOL(drm_dp_mst_topology_mgr_destroy);
2688
 
2689
/* I2C device */
2690
static int drm_dp_mst_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
2691
			       int num)
2692
{
2693
	struct drm_dp_aux *aux = adapter->algo_data;
2694
	struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port, aux);
2695
	struct drm_dp_mst_branch *mstb;
2696
	struct drm_dp_mst_topology_mgr *mgr = port->mgr;
2697
	unsigned int i;
2698
	bool reading = false;
2699
	struct drm_dp_sideband_msg_req_body msg;
2700
	struct drm_dp_sideband_msg_tx *txmsg = NULL;
2701
	int ret;
2702
 
2703
	mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
2704
	if (!mstb)
2705
		return -EREMOTEIO;
2706
 
2707
	/* construct i2c msg */
2708
	/* see if last msg is a read */
2709
	if (msgs[num - 1].flags & I2C_M_RD)
2710
		reading = true;
2711
 
6084 serge 2712
	if (!reading || (num - 1 > DP_REMOTE_I2C_READ_MAX_TRANSACTIONS)) {
5060 serge 2713
		DRM_DEBUG_KMS("Unsupported I2C transaction for MST device\n");
2714
		ret = -EIO;
2715
		goto out;
2716
	}
2717
 
6084 serge 2718
	memset(&msg, 0, sizeof(msg));
5060 serge 2719
	msg.req_type = DP_REMOTE_I2C_READ;
2720
	msg.u.i2c_read.num_transactions = num - 1;
2721
	msg.u.i2c_read.port_number = port->port_num;
2722
	for (i = 0; i < num - 1; i++) {
2723
		msg.u.i2c_read.transactions[i].i2c_dev_id = msgs[i].addr;
2724
		msg.u.i2c_read.transactions[i].num_bytes = msgs[i].len;
2725
		msg.u.i2c_read.transactions[i].bytes = msgs[i].buf;
2726
	}
2727
	msg.u.i2c_read.read_i2c_device_id = msgs[num - 1].addr;
2728
	msg.u.i2c_read.num_bytes_read = msgs[num - 1].len;
2729
 
2730
	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
2731
	if (!txmsg) {
2732
		ret = -ENOMEM;
2733
		goto out;
2734
	}
2735
 
2736
	txmsg->dst = mstb;
2737
	drm_dp_encode_sideband_req(&msg, txmsg);
2738
 
2739
	drm_dp_queue_down_tx(mgr, txmsg);
2740
 
2741
	ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
2742
	if (ret > 0) {
2743
 
2744
		if (txmsg->reply.reply_type == 1) { /* got a NAK back */
2745
			ret = -EREMOTEIO;
2746
			goto out;
2747
		}
2748
		if (txmsg->reply.u.remote_i2c_read_ack.num_bytes != msgs[num - 1].len) {
2749
			ret = -EIO;
2750
			goto out;
2751
		}
2752
		memcpy(msgs[num - 1].buf, txmsg->reply.u.remote_i2c_read_ack.bytes, msgs[num - 1].len);
2753
		ret = num;
2754
	}
2755
out:
2756
	kfree(txmsg);
2757
	drm_dp_put_mst_branch_device(mstb);
2758
	return ret;
2759
}
2760
 
2761
static u32 drm_dp_mst_i2c_functionality(struct i2c_adapter *adapter)
2762
{
2763
	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
2764
	       I2C_FUNC_SMBUS_READ_BLOCK_DATA |
2765
	       I2C_FUNC_SMBUS_BLOCK_PROC_CALL |
2766
	       I2C_FUNC_10BIT_ADDR;
2767
}
2768
 
2769
static const struct i2c_algorithm drm_dp_mst_i2c_algo = {
2770
	.functionality = drm_dp_mst_i2c_functionality,
2771
	.master_xfer = drm_dp_mst_i2c_xfer,
2772
};
2773
 
2774
/**
2775
 * drm_dp_mst_register_i2c_bus() - register an I2C adapter for I2C-over-AUX
2776
 * @aux: DisplayPort AUX channel
2777
 *
2778
 * Returns 0 on success or a negative error code on failure.
2779
 */
2780
static int drm_dp_mst_register_i2c_bus(struct drm_dp_aux *aux)
2781
{
2782
	aux->ddc.algo = &drm_dp_mst_i2c_algo;
2783
	aux->ddc.algo_data = aux;
2784
	aux->ddc.retries = 3;
2785
 
2786
	aux->ddc.class = I2C_CLASS_DDC;
2787
	aux->ddc.owner = THIS_MODULE;
2788
	aux->ddc.dev.parent = aux->dev;
2789
 
2790
	return i2c_add_adapter(&aux->ddc);
2791
}
2792
 
2793
/**
2794
 * drm_dp_mst_unregister_i2c_bus() - unregister an I2C-over-AUX adapter
2795
 * @aux: DisplayPort AUX channel
2796
 */
2797
static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_aux *aux)
2798
{
2799
	i2c_del_adapter(&aux->ddc);
2800
}