Subversion Repositories Kolibri OS

Rev

Rev 6084 | Rev 6320 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
5060 serge 1
/*
2
 * Copyright © 2014 Red Hat
3
 *
4
 * Permission to use, copy, modify, distribute, and sell this software and its
5
 * documentation for any purpose is hereby granted without fee, provided that
6
 * the above copyright notice appear in all copies and that both that copyright
7
 * notice and this permission notice appear in supporting documentation, and
8
 * that the name of the copyright holders not be used in advertising or
9
 * publicity pertaining to distribution of the software without specific,
10
 * written prior permission.  The copyright holders make no representations
11
 * about the suitability of this software for any purpose.  It is provided "as
12
 * is" without express or implied warranty.
13
 *
14
 * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
15
 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
16
 * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
17
 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
18
 * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
19
 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
20
 * OF THIS SOFTWARE.
21
 */
22
 
23
#include 
24
#include 
25
#include 
26
#include 
27
#include 
28
#include 
6088 serge 29
#include 
5060 serge 30
#include 
31
#include 
32
#include 
33
#include 
34
#include 
35
#include 
36
 
37
#include 
38
 
39
u64 get_jiffies_64(void)
40
{
41
    return jiffies;
42
}
43
/**
44
 * DOC: dp mst helper
45
 *
46
 * These functions contain parts of the DisplayPort 1.2a MultiStream Transport
47
 * protocol. The helpers contain a topology manager and bandwidth manager.
48
 * The helpers encapsulate the sending and received of sideband msgs.
49
 */
50
static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
51
				  char *buf);
52
static int test_calc_pbn_mode(void);
53
 
54
static void drm_dp_put_port(struct drm_dp_mst_port *port);
55
 
56
static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
57
				     int id,
58
				     struct drm_dp_payload *payload);
59
 
60
static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
61
				  struct drm_dp_mst_port *port,
62
				  int offset, int size, u8 *bytes);
63
 
6084 serge 64
static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
65
				     struct drm_dp_mst_branch *mstb);
5060 serge 66
static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
67
					   struct drm_dp_mst_branch *mstb,
68
					   struct drm_dp_mst_port *port);
69
static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
70
				 u8 *guid);
71
 
72
static int drm_dp_mst_register_i2c_bus(struct drm_dp_aux *aux);
73
static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_aux *aux);
74
static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr);
75
/* sideband msg handling */
76
static u8 drm_dp_msg_header_crc4(const uint8_t *data, size_t num_nibbles)
77
{
78
	u8 bitmask = 0x80;
79
	u8 bitshift = 7;
80
	u8 array_index = 0;
81
	int number_of_bits = num_nibbles * 4;
82
	u8 remainder = 0;
83
 
84
	while (number_of_bits != 0) {
85
		number_of_bits--;
86
		remainder <<= 1;
87
		remainder |= (data[array_index] & bitmask) >> bitshift;
88
		bitmask >>= 1;
89
		bitshift--;
90
		if (bitmask == 0) {
91
			bitmask = 0x80;
92
			bitshift = 7;
93
			array_index++;
94
		}
95
		if ((remainder & 0x10) == 0x10)
96
			remainder ^= 0x13;
97
	}
98
 
99
	number_of_bits = 4;
100
	while (number_of_bits != 0) {
101
		number_of_bits--;
102
		remainder <<= 1;
103
		if ((remainder & 0x10) != 0)
104
			remainder ^= 0x13;
105
	}
106
 
107
	return remainder;
108
}
109
 
110
static u8 drm_dp_msg_data_crc4(const uint8_t *data, u8 number_of_bytes)
111
{
112
	u8 bitmask = 0x80;
113
	u8 bitshift = 7;
114
	u8 array_index = 0;
115
	int number_of_bits = number_of_bytes * 8;
116
	u16 remainder = 0;
117
 
118
	while (number_of_bits != 0) {
119
		number_of_bits--;
120
		remainder <<= 1;
121
		remainder |= (data[array_index] & bitmask) >> bitshift;
122
		bitmask >>= 1;
123
		bitshift--;
124
		if (bitmask == 0) {
125
			bitmask = 0x80;
126
			bitshift = 7;
127
			array_index++;
128
		}
129
		if ((remainder & 0x100) == 0x100)
130
			remainder ^= 0xd5;
131
	}
132
 
133
	number_of_bits = 8;
134
	while (number_of_bits != 0) {
135
		number_of_bits--;
136
		remainder <<= 1;
137
		if ((remainder & 0x100) != 0)
138
			remainder ^= 0xd5;
139
	}
140
 
141
	return remainder & 0xff;
142
}
143
static inline u8 drm_dp_calc_sb_hdr_size(struct drm_dp_sideband_msg_hdr *hdr)
144
{
145
	u8 size = 3;
146
	size += (hdr->lct / 2);
147
	return size;
148
}
149
 
150
static void drm_dp_encode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr,
151
					   u8 *buf, int *len)
152
{
153
	int idx = 0;
154
	int i;
155
	u8 crc4;
156
	buf[idx++] = ((hdr->lct & 0xf) << 4) | (hdr->lcr & 0xf);
157
	for (i = 0; i < (hdr->lct / 2); i++)
158
		buf[idx++] = hdr->rad[i];
159
	buf[idx++] = (hdr->broadcast << 7) | (hdr->path_msg << 6) |
160
		(hdr->msg_len & 0x3f);
161
	buf[idx++] = (hdr->somt << 7) | (hdr->eomt << 6) | (hdr->seqno << 4);
162
 
163
	crc4 = drm_dp_msg_header_crc4(buf, (idx * 2) - 1);
164
	buf[idx - 1] |= (crc4 & 0xf);
165
 
166
	*len = idx;
167
}
168
 
169
static bool drm_dp_decode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr,
170
					   u8 *buf, int buflen, u8 *hdrlen)
171
{
172
	u8 crc4;
173
	u8 len;
174
	int i;
175
	u8 idx;
176
	if (buf[0] == 0)
177
		return false;
178
	len = 3;
179
	len += ((buf[0] & 0xf0) >> 4) / 2;
180
	if (len > buflen)
181
		return false;
182
	crc4 = drm_dp_msg_header_crc4(buf, (len * 2) - 1);
183
 
184
	if ((crc4 & 0xf) != (buf[len - 1] & 0xf)) {
185
		DRM_DEBUG_KMS("crc4 mismatch 0x%x 0x%x\n", crc4, buf[len - 1]);
186
		return false;
187
	}
188
 
189
	hdr->lct = (buf[0] & 0xf0) >> 4;
190
	hdr->lcr = (buf[0] & 0xf);
191
	idx = 1;
192
	for (i = 0; i < (hdr->lct / 2); i++)
193
		hdr->rad[i] = buf[idx++];
194
	hdr->broadcast = (buf[idx] >> 7) & 0x1;
195
	hdr->path_msg = (buf[idx] >> 6) & 0x1;
196
	hdr->msg_len = buf[idx] & 0x3f;
197
	idx++;
198
	hdr->somt = (buf[idx] >> 7) & 0x1;
199
	hdr->eomt = (buf[idx] >> 6) & 0x1;
200
	hdr->seqno = (buf[idx] >> 4) & 0x1;
201
	idx++;
202
	*hdrlen = idx;
203
	return true;
204
}
205
 
206
static void drm_dp_encode_sideband_req(struct drm_dp_sideband_msg_req_body *req,
207
				       struct drm_dp_sideband_msg_tx *raw)
208
{
209
	int idx = 0;
210
	int i;
211
	u8 *buf = raw->msg;
212
	buf[idx++] = req->req_type & 0x7f;
213
 
214
	switch (req->req_type) {
215
	case DP_ENUM_PATH_RESOURCES:
216
		buf[idx] = (req->u.port_num.port_number & 0xf) << 4;
217
		idx++;
218
		break;
219
	case DP_ALLOCATE_PAYLOAD:
220
		buf[idx] = (req->u.allocate_payload.port_number & 0xf) << 4 |
221
			(req->u.allocate_payload.number_sdp_streams & 0xf);
222
		idx++;
223
		buf[idx] = (req->u.allocate_payload.vcpi & 0x7f);
224
		idx++;
225
		buf[idx] = (req->u.allocate_payload.pbn >> 8);
226
		idx++;
227
		buf[idx] = (req->u.allocate_payload.pbn & 0xff);
228
		idx++;
229
		for (i = 0; i < req->u.allocate_payload.number_sdp_streams / 2; i++) {
230
			buf[idx] = ((req->u.allocate_payload.sdp_stream_sink[i * 2] & 0xf) << 4) |
231
				(req->u.allocate_payload.sdp_stream_sink[i * 2 + 1] & 0xf);
232
			idx++;
233
		}
234
		if (req->u.allocate_payload.number_sdp_streams & 1) {
235
			i = req->u.allocate_payload.number_sdp_streams - 1;
236
			buf[idx] = (req->u.allocate_payload.sdp_stream_sink[i] & 0xf) << 4;
237
			idx++;
238
		}
239
		break;
240
	case DP_QUERY_PAYLOAD:
241
		buf[idx] = (req->u.query_payload.port_number & 0xf) << 4;
242
		idx++;
243
		buf[idx] = (req->u.query_payload.vcpi & 0x7f);
244
		idx++;
245
		break;
246
	case DP_REMOTE_DPCD_READ:
247
		buf[idx] = (req->u.dpcd_read.port_number & 0xf) << 4;
248
		buf[idx] |= ((req->u.dpcd_read.dpcd_address & 0xf0000) >> 16) & 0xf;
249
		idx++;
250
		buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff00) >> 8;
251
		idx++;
252
		buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff);
253
		idx++;
254
		buf[idx] = (req->u.dpcd_read.num_bytes);
255
		idx++;
256
		break;
257
 
258
	case DP_REMOTE_DPCD_WRITE:
259
		buf[idx] = (req->u.dpcd_write.port_number & 0xf) << 4;
260
		buf[idx] |= ((req->u.dpcd_write.dpcd_address & 0xf0000) >> 16) & 0xf;
261
		idx++;
262
		buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff00) >> 8;
263
		idx++;
264
		buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff);
265
		idx++;
266
		buf[idx] = (req->u.dpcd_write.num_bytes);
267
		idx++;
268
		memcpy(&buf[idx], req->u.dpcd_write.bytes, req->u.dpcd_write.num_bytes);
269
		idx += req->u.dpcd_write.num_bytes;
270
		break;
271
	case DP_REMOTE_I2C_READ:
272
		buf[idx] = (req->u.i2c_read.port_number & 0xf) << 4;
273
		buf[idx] |= (req->u.i2c_read.num_transactions & 0x3);
274
		idx++;
275
		for (i = 0; i < (req->u.i2c_read.num_transactions & 0x3); i++) {
276
			buf[idx] = req->u.i2c_read.transactions[i].i2c_dev_id & 0x7f;
277
			idx++;
278
			buf[idx] = req->u.i2c_read.transactions[i].num_bytes;
279
			idx++;
280
			memcpy(&buf[idx], req->u.i2c_read.transactions[i].bytes, req->u.i2c_read.transactions[i].num_bytes);
281
			idx += req->u.i2c_read.transactions[i].num_bytes;
282
 
283
			buf[idx] = (req->u.i2c_read.transactions[i].no_stop_bit & 0x1) << 5;
284
			buf[idx] |= (req->u.i2c_read.transactions[i].i2c_transaction_delay & 0xf);
285
			idx++;
286
		}
287
		buf[idx] = (req->u.i2c_read.read_i2c_device_id) & 0x7f;
288
		idx++;
289
		buf[idx] = (req->u.i2c_read.num_bytes_read);
290
		idx++;
291
		break;
292
 
293
	case DP_REMOTE_I2C_WRITE:
294
		buf[idx] = (req->u.i2c_write.port_number & 0xf) << 4;
295
		idx++;
296
		buf[idx] = (req->u.i2c_write.write_i2c_device_id) & 0x7f;
297
		idx++;
298
		buf[idx] = (req->u.i2c_write.num_bytes);
299
		idx++;
300
		memcpy(&buf[idx], req->u.i2c_write.bytes, req->u.i2c_write.num_bytes);
301
		idx += req->u.i2c_write.num_bytes;
302
		break;
303
	}
304
	raw->cur_len = idx;
305
}
306
 
307
static void drm_dp_crc_sideband_chunk_req(u8 *msg, u8 len)
308
{
309
	u8 crc4;
310
	crc4 = drm_dp_msg_data_crc4(msg, len);
311
	msg[len] = crc4;
312
}
313
 
314
static void drm_dp_encode_sideband_reply(struct drm_dp_sideband_msg_reply_body *rep,
315
					 struct drm_dp_sideband_msg_tx *raw)
316
{
317
	int idx = 0;
318
	u8 *buf = raw->msg;
319
 
320
	buf[idx++] = (rep->reply_type & 0x1) << 7 | (rep->req_type & 0x7f);
321
 
322
	raw->cur_len = idx;
323
}
324
 
325
/* this adds a chunk of msg to the builder to get the final msg */
326
static bool drm_dp_sideband_msg_build(struct drm_dp_sideband_msg_rx *msg,
327
				      u8 *replybuf, u8 replybuflen, bool hdr)
328
{
329
	int ret;
330
	u8 crc4;
331
 
332
	if (hdr) {
333
		u8 hdrlen;
334
		struct drm_dp_sideband_msg_hdr recv_hdr;
335
		ret = drm_dp_decode_sideband_msg_hdr(&recv_hdr, replybuf, replybuflen, &hdrlen);
336
		if (ret == false) {
337
			print_hex_dump(KERN_DEBUG, "failed hdr", DUMP_PREFIX_NONE, 16, 1, replybuf, replybuflen, false);
338
			return false;
339
		}
340
 
341
		/* get length contained in this portion */
342
		msg->curchunk_len = recv_hdr.msg_len;
343
		msg->curchunk_hdrlen = hdrlen;
344
 
345
		/* we have already gotten an somt - don't bother parsing */
346
		if (recv_hdr.somt && msg->have_somt)
347
			return false;
348
 
349
		if (recv_hdr.somt) {
350
			memcpy(&msg->initial_hdr, &recv_hdr, sizeof(struct drm_dp_sideband_msg_hdr));
351
			msg->have_somt = true;
352
		}
353
		if (recv_hdr.eomt)
354
			msg->have_eomt = true;
355
 
356
		/* copy the bytes for the remainder of this header chunk */
357
		msg->curchunk_idx = min(msg->curchunk_len, (u8)(replybuflen - hdrlen));
358
		memcpy(&msg->chunk[0], replybuf + hdrlen, msg->curchunk_idx);
359
	} else {
360
		memcpy(&msg->chunk[msg->curchunk_idx], replybuf, replybuflen);
361
		msg->curchunk_idx += replybuflen;
362
	}
363
 
364
	if (msg->curchunk_idx >= msg->curchunk_len) {
365
		/* do CRC */
366
		crc4 = drm_dp_msg_data_crc4(msg->chunk, msg->curchunk_len - 1);
367
		/* copy chunk into bigger msg */
368
		memcpy(&msg->msg[msg->curlen], msg->chunk, msg->curchunk_len - 1);
369
		msg->curlen += msg->curchunk_len - 1;
370
	}
371
	return true;
372
}
373
 
374
static bool drm_dp_sideband_parse_link_address(struct drm_dp_sideband_msg_rx *raw,
375
					       struct drm_dp_sideband_msg_reply_body *repmsg)
376
{
377
	int idx = 1;
378
	int i;
379
	memcpy(repmsg->u.link_addr.guid, &raw->msg[idx], 16);
380
	idx += 16;
381
	repmsg->u.link_addr.nports = raw->msg[idx] & 0xf;
382
	idx++;
383
	if (idx > raw->curlen)
384
		goto fail_len;
385
	for (i = 0; i < repmsg->u.link_addr.nports; i++) {
386
		if (raw->msg[idx] & 0x80)
387
			repmsg->u.link_addr.ports[i].input_port = 1;
388
 
389
		repmsg->u.link_addr.ports[i].peer_device_type = (raw->msg[idx] >> 4) & 0x7;
390
		repmsg->u.link_addr.ports[i].port_number = (raw->msg[idx] & 0xf);
391
 
392
		idx++;
393
		if (idx > raw->curlen)
394
			goto fail_len;
395
		repmsg->u.link_addr.ports[i].mcs = (raw->msg[idx] >> 7) & 0x1;
396
		repmsg->u.link_addr.ports[i].ddps = (raw->msg[idx] >> 6) & 0x1;
397
		if (repmsg->u.link_addr.ports[i].input_port == 0)
398
			repmsg->u.link_addr.ports[i].legacy_device_plug_status = (raw->msg[idx] >> 5) & 0x1;
399
		idx++;
400
		if (idx > raw->curlen)
401
			goto fail_len;
402
		if (repmsg->u.link_addr.ports[i].input_port == 0) {
403
			repmsg->u.link_addr.ports[i].dpcd_revision = (raw->msg[idx]);
404
			idx++;
405
			if (idx > raw->curlen)
406
				goto fail_len;
407
			memcpy(repmsg->u.link_addr.ports[i].peer_guid, &raw->msg[idx], 16);
408
			idx += 16;
409
			if (idx > raw->curlen)
410
				goto fail_len;
411
			repmsg->u.link_addr.ports[i].num_sdp_streams = (raw->msg[idx] >> 4) & 0xf;
412
			repmsg->u.link_addr.ports[i].num_sdp_stream_sinks = (raw->msg[idx] & 0xf);
413
			idx++;
414
 
415
		}
416
		if (idx > raw->curlen)
417
			goto fail_len;
418
	}
419
 
420
	return true;
421
fail_len:
422
	DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen);
423
	return false;
424
}
425
 
426
static bool drm_dp_sideband_parse_remote_dpcd_read(struct drm_dp_sideband_msg_rx *raw,
427
						   struct drm_dp_sideband_msg_reply_body *repmsg)
428
{
429
	int idx = 1;
430
	repmsg->u.remote_dpcd_read_ack.port_number = raw->msg[idx] & 0xf;
431
	idx++;
432
	if (idx > raw->curlen)
433
		goto fail_len;
434
	repmsg->u.remote_dpcd_read_ack.num_bytes = raw->msg[idx];
435
	if (idx > raw->curlen)
436
		goto fail_len;
437
 
438
	memcpy(repmsg->u.remote_dpcd_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_dpcd_read_ack.num_bytes);
439
	return true;
440
fail_len:
441
	DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen);
442
	return false;
443
}
444
 
445
static bool drm_dp_sideband_parse_remote_dpcd_write(struct drm_dp_sideband_msg_rx *raw,
446
						      struct drm_dp_sideband_msg_reply_body *repmsg)
447
{
448
	int idx = 1;
449
	repmsg->u.remote_dpcd_write_ack.port_number = raw->msg[idx] & 0xf;
450
	idx++;
451
	if (idx > raw->curlen)
452
		goto fail_len;
453
	return true;
454
fail_len:
455
	DRM_DEBUG_KMS("parse length fail %d %d\n", idx, raw->curlen);
456
	return false;
457
}
458
 
459
static bool drm_dp_sideband_parse_remote_i2c_read_ack(struct drm_dp_sideband_msg_rx *raw,
460
						      struct drm_dp_sideband_msg_reply_body *repmsg)
461
{
462
	int idx = 1;
463
 
464
	repmsg->u.remote_i2c_read_ack.port_number = (raw->msg[idx] & 0xf);
465
	idx++;
466
	if (idx > raw->curlen)
467
		goto fail_len;
468
	repmsg->u.remote_i2c_read_ack.num_bytes = raw->msg[idx];
469
	idx++;
470
	/* TODO check */
471
	memcpy(repmsg->u.remote_i2c_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_i2c_read_ack.num_bytes);
472
	return true;
473
fail_len:
474
	DRM_DEBUG_KMS("remote i2c reply parse length fail %d %d\n", idx, raw->curlen);
475
	return false;
476
}
477
 
478
static bool drm_dp_sideband_parse_enum_path_resources_ack(struct drm_dp_sideband_msg_rx *raw,
479
							  struct drm_dp_sideband_msg_reply_body *repmsg)
480
{
481
	int idx = 1;
482
	repmsg->u.path_resources.port_number = (raw->msg[idx] >> 4) & 0xf;
483
	idx++;
484
	if (idx > raw->curlen)
485
		goto fail_len;
486
	repmsg->u.path_resources.full_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
487
	idx += 2;
488
	if (idx > raw->curlen)
489
		goto fail_len;
490
	repmsg->u.path_resources.avail_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
491
	idx += 2;
492
	if (idx > raw->curlen)
493
		goto fail_len;
494
	return true;
495
fail_len:
496
	DRM_DEBUG_KMS("enum resource parse length fail %d %d\n", idx, raw->curlen);
497
	return false;
498
}
499
 
500
static bool drm_dp_sideband_parse_allocate_payload_ack(struct drm_dp_sideband_msg_rx *raw,
501
							  struct drm_dp_sideband_msg_reply_body *repmsg)
502
{
503
	int idx = 1;
504
	repmsg->u.allocate_payload.port_number = (raw->msg[idx] >> 4) & 0xf;
505
	idx++;
506
	if (idx > raw->curlen)
507
		goto fail_len;
508
	repmsg->u.allocate_payload.vcpi = raw->msg[idx];
509
	idx++;
510
	if (idx > raw->curlen)
511
		goto fail_len;
512
	repmsg->u.allocate_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
513
	idx += 2;
514
	if (idx > raw->curlen)
515
		goto fail_len;
516
	return true;
517
fail_len:
518
	DRM_DEBUG_KMS("allocate payload parse length fail %d %d\n", idx, raw->curlen);
519
	return false;
520
}
521
 
522
static bool drm_dp_sideband_parse_query_payload_ack(struct drm_dp_sideband_msg_rx *raw,
523
						    struct drm_dp_sideband_msg_reply_body *repmsg)
524
{
525
	int idx = 1;
526
	repmsg->u.query_payload.port_number = (raw->msg[idx] >> 4) & 0xf;
527
	idx++;
528
	if (idx > raw->curlen)
529
		goto fail_len;
530
	repmsg->u.query_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]);
531
	idx += 2;
532
	if (idx > raw->curlen)
533
		goto fail_len;
534
	return true;
535
fail_len:
536
	DRM_DEBUG_KMS("query payload parse length fail %d %d\n", idx, raw->curlen);
537
	return false;
538
}
539
 
540
static bool drm_dp_sideband_parse_reply(struct drm_dp_sideband_msg_rx *raw,
541
					struct drm_dp_sideband_msg_reply_body *msg)
542
{
543
	memset(msg, 0, sizeof(*msg));
544
	msg->reply_type = (raw->msg[0] & 0x80) >> 7;
545
	msg->req_type = (raw->msg[0] & 0x7f);
546
 
547
	if (msg->reply_type) {
548
		memcpy(msg->u.nak.guid, &raw->msg[1], 16);
549
		msg->u.nak.reason = raw->msg[17];
550
		msg->u.nak.nak_data = raw->msg[18];
551
		return false;
552
	}
553
 
554
	switch (msg->req_type) {
555
	case DP_LINK_ADDRESS:
556
		return drm_dp_sideband_parse_link_address(raw, msg);
557
	case DP_QUERY_PAYLOAD:
558
		return drm_dp_sideband_parse_query_payload_ack(raw, msg);
559
	case DP_REMOTE_DPCD_READ:
560
		return drm_dp_sideband_parse_remote_dpcd_read(raw, msg);
561
	case DP_REMOTE_DPCD_WRITE:
562
		return drm_dp_sideband_parse_remote_dpcd_write(raw, msg);
563
	case DP_REMOTE_I2C_READ:
564
		return drm_dp_sideband_parse_remote_i2c_read_ack(raw, msg);
565
	case DP_ENUM_PATH_RESOURCES:
566
		return drm_dp_sideband_parse_enum_path_resources_ack(raw, msg);
567
	case DP_ALLOCATE_PAYLOAD:
568
		return drm_dp_sideband_parse_allocate_payload_ack(raw, msg);
569
	default:
570
		DRM_ERROR("Got unknown reply 0x%02x\n", msg->req_type);
571
		return false;
572
	}
573
}
574
 
575
static bool drm_dp_sideband_parse_connection_status_notify(struct drm_dp_sideband_msg_rx *raw,
576
							   struct drm_dp_sideband_msg_req_body *msg)
577
{
578
	int idx = 1;
579
 
580
	msg->u.conn_stat.port_number = (raw->msg[idx] & 0xf0) >> 4;
581
	idx++;
582
	if (idx > raw->curlen)
583
		goto fail_len;
584
 
585
	memcpy(msg->u.conn_stat.guid, &raw->msg[idx], 16);
586
	idx += 16;
587
	if (idx > raw->curlen)
588
		goto fail_len;
589
 
590
	msg->u.conn_stat.legacy_device_plug_status = (raw->msg[idx] >> 6) & 0x1;
591
	msg->u.conn_stat.displayport_device_plug_status = (raw->msg[idx] >> 5) & 0x1;
592
	msg->u.conn_stat.message_capability_status = (raw->msg[idx] >> 4) & 0x1;
593
	msg->u.conn_stat.input_port = (raw->msg[idx] >> 3) & 0x1;
594
	msg->u.conn_stat.peer_device_type = (raw->msg[idx] & 0x7);
595
	idx++;
596
	return true;
597
fail_len:
598
	DRM_DEBUG_KMS("connection status reply parse length fail %d %d\n", idx, raw->curlen);
599
	return false;
600
}
601
 
602
static bool drm_dp_sideband_parse_resource_status_notify(struct drm_dp_sideband_msg_rx *raw,
603
							   struct drm_dp_sideband_msg_req_body *msg)
604
{
605
	int idx = 1;
606
 
607
	msg->u.resource_stat.port_number = (raw->msg[idx] & 0xf0) >> 4;
608
	idx++;
609
	if (idx > raw->curlen)
610
		goto fail_len;
611
 
612
	memcpy(msg->u.resource_stat.guid, &raw->msg[idx], 16);
613
	idx += 16;
614
	if (idx > raw->curlen)
615
		goto fail_len;
616
 
617
	msg->u.resource_stat.available_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]);
618
	idx++;
619
	return true;
620
fail_len:
621
	DRM_DEBUG_KMS("resource status reply parse length fail %d %d\n", idx, raw->curlen);
622
	return false;
623
}
624
 
625
static bool drm_dp_sideband_parse_req(struct drm_dp_sideband_msg_rx *raw,
626
				      struct drm_dp_sideband_msg_req_body *msg)
627
{
628
	memset(msg, 0, sizeof(*msg));
629
	msg->req_type = (raw->msg[0] & 0x7f);
630
 
631
	switch (msg->req_type) {
632
	case DP_CONNECTION_STATUS_NOTIFY:
633
		return drm_dp_sideband_parse_connection_status_notify(raw, msg);
634
	case DP_RESOURCE_STATUS_NOTIFY:
635
		return drm_dp_sideband_parse_resource_status_notify(raw, msg);
636
	default:
637
		DRM_ERROR("Got unknown request 0x%02x\n", msg->req_type);
638
		return false;
639
	}
640
}
641
 
642
static int build_dpcd_write(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32 offset, u8 num_bytes, u8 *bytes)
643
{
644
	struct drm_dp_sideband_msg_req_body req;
645
 
646
	req.req_type = DP_REMOTE_DPCD_WRITE;
647
	req.u.dpcd_write.port_number = port_num;
648
	req.u.dpcd_write.dpcd_address = offset;
649
	req.u.dpcd_write.num_bytes = num_bytes;
650
	req.u.dpcd_write.bytes = bytes;
651
	drm_dp_encode_sideband_req(&req, msg);
652
 
653
	return 0;
654
}
655
 
656
static int build_link_address(struct drm_dp_sideband_msg_tx *msg)
657
{
658
	struct drm_dp_sideband_msg_req_body req;
659
 
660
	req.req_type = DP_LINK_ADDRESS;
661
	drm_dp_encode_sideband_req(&req, msg);
662
	return 0;
663
}
664
 
665
static int build_enum_path_resources(struct drm_dp_sideband_msg_tx *msg, int port_num)
666
{
667
	struct drm_dp_sideband_msg_req_body req;
668
 
669
	req.req_type = DP_ENUM_PATH_RESOURCES;
670
	req.u.port_num.port_number = port_num;
671
	drm_dp_encode_sideband_req(&req, msg);
672
	msg->path_msg = true;
673
	return 0;
674
}
675
 
676
static int build_allocate_payload(struct drm_dp_sideband_msg_tx *msg, int port_num,
677
				  u8 vcpi, uint16_t pbn)
678
{
679
	struct drm_dp_sideband_msg_req_body req;
680
	memset(&req, 0, sizeof(req));
681
	req.req_type = DP_ALLOCATE_PAYLOAD;
682
	req.u.allocate_payload.port_number = port_num;
683
	req.u.allocate_payload.vcpi = vcpi;
684
	req.u.allocate_payload.pbn = pbn;
685
	drm_dp_encode_sideband_req(&req, msg);
686
	msg->path_msg = true;
687
	return 0;
688
}
689
 
690
static int drm_dp_mst_assign_payload_id(struct drm_dp_mst_topology_mgr *mgr,
691
					struct drm_dp_vcpi *vcpi)
692
{
5271 serge 693
	int ret, vcpi_ret;
5060 serge 694
 
695
	mutex_lock(&mgr->payload_lock);
696
	ret = find_first_zero_bit(&mgr->payload_mask, mgr->max_payloads + 1);
697
	if (ret > mgr->max_payloads) {
698
		ret = -EINVAL;
699
		DRM_DEBUG_KMS("out of payload ids %d\n", ret);
700
		goto out_unlock;
701
	}
702
 
5271 serge 703
	vcpi_ret = find_first_zero_bit(&mgr->vcpi_mask, mgr->max_payloads + 1);
704
	if (vcpi_ret > mgr->max_payloads) {
705
		ret = -EINVAL;
706
		DRM_DEBUG_KMS("out of vcpi ids %d\n", ret);
707
		goto out_unlock;
708
	}
709
 
5060 serge 710
	set_bit(ret, &mgr->payload_mask);
5271 serge 711
	set_bit(vcpi_ret, &mgr->vcpi_mask);
712
	vcpi->vcpi = vcpi_ret + 1;
5060 serge 713
	mgr->proposed_vcpis[ret - 1] = vcpi;
714
out_unlock:
715
	mutex_unlock(&mgr->payload_lock);
716
	return ret;
717
}
718
 
719
static void drm_dp_mst_put_payload_id(struct drm_dp_mst_topology_mgr *mgr,
5271 serge 720
				      int vcpi)
5060 serge 721
{
5271 serge 722
	int i;
723
	if (vcpi == 0)
5060 serge 724
		return;
725
 
726
	mutex_lock(&mgr->payload_lock);
5271 serge 727
	DRM_DEBUG_KMS("putting payload %d\n", vcpi);
728
	clear_bit(vcpi - 1, &mgr->vcpi_mask);
729
 
730
	for (i = 0; i < mgr->max_payloads; i++) {
731
		if (mgr->proposed_vcpis[i])
732
			if (mgr->proposed_vcpis[i]->vcpi == vcpi) {
733
				mgr->proposed_vcpis[i] = NULL;
734
				clear_bit(i + 1, &mgr->payload_mask);
735
			}
736
	}
5060 serge 737
	mutex_unlock(&mgr->payload_lock);
738
}
739
 
740
static bool check_txmsg_state(struct drm_dp_mst_topology_mgr *mgr,
741
			      struct drm_dp_sideband_msg_tx *txmsg)
742
{
743
	bool ret;
6084 serge 744
 
745
	/*
746
	 * All updates to txmsg->state are protected by mgr->qlock, and the two
747
	 * cases we check here are terminal states. For those the barriers
748
	 * provided by the wake_up/wait_event pair are enough.
749
	 */
5060 serge 750
	ret = (txmsg->state == DRM_DP_SIDEBAND_TX_RX ||
751
	       txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT);
752
	return ret;
753
}
754
 
755
static int drm_dp_mst_wait_tx_reply(struct drm_dp_mst_branch *mstb,
756
				    struct drm_dp_sideband_msg_tx *txmsg)
757
{
758
	struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
759
	int ret;
760
 
761
	ret = wait_event_timeout(mgr->tx_waitq,
762
				 check_txmsg_state(mgr, txmsg),
763
				 (4 * HZ));
764
	mutex_lock(&mstb->mgr->qlock);
765
	if (ret > 0) {
766
		if (txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT) {
767
			ret = -EIO;
768
			goto out;
769
		}
770
	} else {
771
		DRM_DEBUG_KMS("timedout msg send %p %d %d\n", txmsg, txmsg->state, txmsg->seqno);
772
 
773
		/* dump some state */
774
		ret = -EIO;
775
 
776
		/* remove from q */
777
		if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED ||
778
		    txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND) {
779
			list_del(&txmsg->next);
780
		}
781
 
782
		if (txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND ||
783
		    txmsg->state == DRM_DP_SIDEBAND_TX_SENT) {
784
			mstb->tx_slots[txmsg->seqno] = NULL;
785
		}
786
	}
787
out:
788
	mutex_unlock(&mgr->qlock);
789
 
790
	return ret;
791
}
792
 
793
static struct drm_dp_mst_branch *drm_dp_add_mst_branch_device(u8 lct, u8 *rad)
794
{
795
	struct drm_dp_mst_branch *mstb;
796
 
797
	mstb = kzalloc(sizeof(*mstb), GFP_KERNEL);
798
	if (!mstb)
799
		return NULL;
800
 
801
	mstb->lct = lct;
802
	if (lct > 1)
803
		memcpy(mstb->rad, rad, lct / 2);
804
	INIT_LIST_HEAD(&mstb->ports);
805
	kref_init(&mstb->kref);
806
	return mstb;
807
}
808
 
809
static void drm_dp_destroy_mst_branch_device(struct kref *kref)
810
{
811
	struct drm_dp_mst_branch *mstb = container_of(kref, struct drm_dp_mst_branch, kref);
812
	struct drm_dp_mst_port *port, *tmp;
813
	bool wake_tx = false;
814
 
815
	/*
816
	 * destroy all ports - don't need lock
817
	 * as there are no more references to the mst branch
818
	 * device at this point.
819
	 */
820
	list_for_each_entry_safe(port, tmp, &mstb->ports, next) {
821
		list_del(&port->next);
822
		drm_dp_put_port(port);
823
	}
824
 
825
	/* drop any tx slots msg */
826
	mutex_lock(&mstb->mgr->qlock);
827
	if (mstb->tx_slots[0]) {
828
		mstb->tx_slots[0]->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
829
		mstb->tx_slots[0] = NULL;
830
		wake_tx = true;
831
	}
832
	if (mstb->tx_slots[1]) {
833
		mstb->tx_slots[1]->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
834
		mstb->tx_slots[1] = NULL;
835
		wake_tx = true;
836
	}
837
	mutex_unlock(&mstb->mgr->qlock);
838
 
839
//   if (wake_tx)
840
//       wake_up(&mstb->mgr->tx_waitq);
841
	kfree(mstb);
842
}
843
 
844
static void drm_dp_put_mst_branch_device(struct drm_dp_mst_branch *mstb)
845
{
846
	kref_put(&mstb->kref, drm_dp_destroy_mst_branch_device);
847
}
848
 
849
 
850
static void drm_dp_port_teardown_pdt(struct drm_dp_mst_port *port, int old_pdt)
851
{
5271 serge 852
	struct drm_dp_mst_branch *mstb;
853
 
5060 serge 854
	switch (old_pdt) {
855
	case DP_PEER_DEVICE_DP_LEGACY_CONV:
856
	case DP_PEER_DEVICE_SST_SINK:
857
		/* remove i2c over sideband */
858
		drm_dp_mst_unregister_i2c_bus(&port->aux);
859
		break;
860
	case DP_PEER_DEVICE_MST_BRANCHING:
5271 serge 861
		mstb = port->mstb;
5060 serge 862
		port->mstb = NULL;
5271 serge 863
		drm_dp_put_mst_branch_device(mstb);
5060 serge 864
		break;
865
	}
866
}
867
 
868
static void drm_dp_destroy_port(struct kref *kref)
869
{
870
	struct drm_dp_mst_port *port = container_of(kref, struct drm_dp_mst_port, kref);
871
	struct drm_dp_mst_topology_mgr *mgr = port->mgr;
6084 serge 872
 
5060 serge 873
	if (!port->input) {
874
		port->vcpi.num_slots = 0;
5271 serge 875
 
876
		kfree(port->cached_edid);
6084 serge 877
 
878
		/*
879
		 * The only time we don't have a connector
880
		 * on an output port is if the connector init
881
		 * fails.
882
		 */
883
		if (port->connector) {
884
			/* we can't destroy the connector here, as
885
			 * we might be holding the mode_config.mutex
886
			 * from an EDID retrieval */
887
 
888
			mutex_lock(&mgr->destroy_connector_lock);
889
			list_add(&port->next, &mgr->destroy_connector_list);
890
			mutex_unlock(&mgr->destroy_connector_lock);
891
//		schedule_work(&mgr->destroy_connector_work);
892
			return;
893
		}
894
		/* no need to clean up vcpi
895
		 * as if we have no connector we never setup a vcpi */
5060 serge 896
		drm_dp_port_teardown_pdt(port, port->pdt);
897
	}
898
	kfree(port);
899
}
900
 
901
static void drm_dp_put_port(struct drm_dp_mst_port *port)
902
{
903
	kref_put(&port->kref, drm_dp_destroy_port);
904
}
905
 
906
static struct drm_dp_mst_branch *drm_dp_mst_get_validated_mstb_ref_locked(struct drm_dp_mst_branch *mstb, struct drm_dp_mst_branch *to_find)
907
{
908
	struct drm_dp_mst_port *port;
909
	struct drm_dp_mst_branch *rmstb;
910
	if (to_find == mstb) {
911
		kref_get(&mstb->kref);
912
		return mstb;
913
	}
914
	list_for_each_entry(port, &mstb->ports, next) {
915
		if (port->mstb) {
916
			rmstb = drm_dp_mst_get_validated_mstb_ref_locked(port->mstb, to_find);
917
			if (rmstb)
918
				return rmstb;
919
		}
920
	}
921
	return NULL;
922
}
923
 
924
static struct drm_dp_mst_branch *drm_dp_get_validated_mstb_ref(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_branch *mstb)
925
{
926
	struct drm_dp_mst_branch *rmstb = NULL;
927
	mutex_lock(&mgr->lock);
928
	if (mgr->mst_primary)
929
		rmstb = drm_dp_mst_get_validated_mstb_ref_locked(mgr->mst_primary, mstb);
930
	mutex_unlock(&mgr->lock);
931
	return rmstb;
932
}
933
 
934
static struct drm_dp_mst_port *drm_dp_mst_get_port_ref_locked(struct drm_dp_mst_branch *mstb, struct drm_dp_mst_port *to_find)
935
{
936
	struct drm_dp_mst_port *port, *mport;
937
 
938
	list_for_each_entry(port, &mstb->ports, next) {
939
		if (port == to_find) {
940
			kref_get(&port->kref);
941
			return port;
942
		}
943
		if (port->mstb) {
944
			mport = drm_dp_mst_get_port_ref_locked(port->mstb, to_find);
945
			if (mport)
946
				return mport;
947
		}
948
	}
949
	return NULL;
950
}
951
 
952
static struct drm_dp_mst_port *drm_dp_get_validated_port_ref(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
953
{
954
	struct drm_dp_mst_port *rport = NULL;
955
	mutex_lock(&mgr->lock);
956
	if (mgr->mst_primary)
957
		rport = drm_dp_mst_get_port_ref_locked(mgr->mst_primary, port);
958
	mutex_unlock(&mgr->lock);
959
	return rport;
960
}
961
 
962
static struct drm_dp_mst_port *drm_dp_get_port(struct drm_dp_mst_branch *mstb, u8 port_num)
963
{
964
	struct drm_dp_mst_port *port;
965
 
966
	list_for_each_entry(port, &mstb->ports, next) {
967
		if (port->port_num == port_num) {
968
			kref_get(&port->kref);
969
			return port;
970
		}
971
	}
972
 
973
	return NULL;
974
}
975
 
976
/*
977
 * calculate a new RAD for this MST branch device
978
 * if parent has an LCT of 2 then it has 1 nibble of RAD,
979
 * if parent has an LCT of 3 then it has 2 nibbles of RAD,
980
 */
981
static u8 drm_dp_calculate_rad(struct drm_dp_mst_port *port,
982
				 u8 *rad)
983
{
984
	int lct = port->parent->lct;
985
	int shift = 4;
986
	int idx = lct / 2;
987
	if (lct > 1) {
988
		memcpy(rad, port->parent->rad, idx);
989
		shift = (lct % 2) ? 4 : 0;
990
	} else
991
		rad[0] = 0;
992
 
993
	rad[idx] |= port->port_num << shift;
994
	return lct + 1;
995
}
996
 
997
/*
998
 * return sends link address for new mstb
999
 */
1000
static bool drm_dp_port_setup_pdt(struct drm_dp_mst_port *port)
1001
{
1002
	int ret;
1003
	u8 rad[6], lct;
1004
	bool send_link = false;
1005
	switch (port->pdt) {
1006
	case DP_PEER_DEVICE_DP_LEGACY_CONV:
1007
	case DP_PEER_DEVICE_SST_SINK:
1008
		/* add i2c over sideband */
1009
		ret = drm_dp_mst_register_i2c_bus(&port->aux);
1010
		break;
1011
	case DP_PEER_DEVICE_MST_BRANCHING:
1012
		lct = drm_dp_calculate_rad(port, rad);
1013
 
1014
		port->mstb = drm_dp_add_mst_branch_device(lct, rad);
1015
		port->mstb->mgr = port->mgr;
1016
		port->mstb->port_parent = port;
1017
 
1018
		send_link = true;
1019
		break;
1020
	}
1021
	return send_link;
1022
}
1023
 
1024
static void drm_dp_check_port_guid(struct drm_dp_mst_branch *mstb,
1025
				   struct drm_dp_mst_port *port)
1026
{
1027
	int ret;
1028
	if (port->dpcd_rev >= 0x12) {
1029
		port->guid_valid = drm_dp_validate_guid(mstb->mgr, port->guid);
1030
		if (!port->guid_valid) {
1031
			ret = drm_dp_send_dpcd_write(mstb->mgr,
1032
						     port,
1033
						     DP_GUID,
1034
						     16, port->guid);
1035
			port->guid_valid = true;
1036
		}
1037
	}
1038
}
1039
 
6084 serge 1040
static void build_mst_prop_path(const struct drm_dp_mst_branch *mstb,
1041
				int pnum,
5271 serge 1042
				char *proppath,
1043
				size_t proppath_size)
5060 serge 1044
{
1045
	int i;
1046
	char temp[8];
5271 serge 1047
	snprintf(proppath, proppath_size, "mst:%d", mstb->mgr->conn_base_id);
5060 serge 1048
	for (i = 0; i < (mstb->lct - 1); i++) {
1049
		int shift = (i % 2) ? 0 : 4;
1050
		int port_num = mstb->rad[i / 2] >> shift;
5271 serge 1051
		snprintf(temp, sizeof(temp), "-%d", port_num);
1052
		strlcat(proppath, temp, proppath_size);
5060 serge 1053
	}
6084 serge 1054
	snprintf(temp, sizeof(temp), "-%d", pnum);
5271 serge 1055
	strlcat(proppath, temp, proppath_size);
5060 serge 1056
}
1057
 
1058
static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
1059
			    struct device *dev,
1060
			    struct drm_dp_link_addr_reply_port *port_msg)
1061
{
1062
	struct drm_dp_mst_port *port;
1063
	bool ret;
1064
	bool created = false;
1065
	int old_pdt = 0;
1066
	int old_ddps = 0;
1067
	port = drm_dp_get_port(mstb, port_msg->port_number);
1068
	if (!port) {
1069
		port = kzalloc(sizeof(*port), GFP_KERNEL);
1070
		if (!port)
1071
			return;
1072
		kref_init(&port->kref);
1073
		port->parent = mstb;
1074
		port->port_num = port_msg->port_number;
1075
		port->mgr = mstb->mgr;
1076
		port->aux.name = "DPMST";
1077
		port->aux.dev = dev;
1078
		created = true;
1079
	} else {
1080
		old_pdt = port->pdt;
1081
		old_ddps = port->ddps;
1082
	}
1083
 
1084
	port->pdt = port_msg->peer_device_type;
1085
	port->input = port_msg->input_port;
1086
	port->mcs = port_msg->mcs;
1087
	port->ddps = port_msg->ddps;
1088
	port->ldps = port_msg->legacy_device_plug_status;
1089
	port->dpcd_rev = port_msg->dpcd_revision;
1090
	port->num_sdp_streams = port_msg->num_sdp_streams;
1091
	port->num_sdp_stream_sinks = port_msg->num_sdp_stream_sinks;
1092
	memcpy(port->guid, port_msg->peer_guid, 16);
1093
 
1094
	/* manage mstb port lists with mgr lock - take a reference
1095
	   for this list */
1096
	if (created) {
1097
		mutex_lock(&mstb->mgr->lock);
1098
		kref_get(&port->kref);
1099
		list_add(&port->next, &mstb->ports);
1100
		mutex_unlock(&mstb->mgr->lock);
1101
	}
1102
 
1103
	if (old_ddps != port->ddps) {
1104
		if (port->ddps) {
1105
			drm_dp_check_port_guid(mstb, port);
1106
			if (!port->input)
1107
				drm_dp_send_enum_path_resources(mstb->mgr, mstb, port);
1108
		} else {
1109
			port->guid_valid = false;
1110
			port->available_pbn = 0;
1111
			}
1112
	}
1113
 
1114
	if (old_pdt != port->pdt && !port->input) {
1115
		drm_dp_port_teardown_pdt(port, old_pdt);
1116
 
1117
		ret = drm_dp_port_setup_pdt(port);
6084 serge 1118
		if (ret == true)
5060 serge 1119
			drm_dp_send_link_address(mstb->mgr, port->mstb);
1120
	}
1121
 
1122
	if (created && !port->input) {
1123
		char proppath[255];
6084 serge 1124
 
1125
		build_mst_prop_path(mstb, port->port_num, proppath, sizeof(proppath));
5060 serge 1126
		port->connector = (*mstb->mgr->cbs->add_connector)(mstb->mgr, port, proppath);
6084 serge 1127
		if (!port->connector) {
1128
			/* remove it from the port list */
1129
			mutex_lock(&mstb->mgr->lock);
1130
			list_del(&port->next);
1131
			mutex_unlock(&mstb->mgr->lock);
1132
			/* drop port list reference */
1133
			drm_dp_put_port(port);
1134
			goto out;
1135
		}
1136
		if (port->port_num >= DP_MST_LOGICAL_PORT_0) {
5271 serge 1137
			port->cached_edid = drm_get_edid(port->connector, &port->aux.ddc);
6084 serge 1138
			drm_mode_connector_set_tile_property(port->connector);
5271 serge 1139
		}
6084 serge 1140
		(*mstb->mgr->cbs->register_connector)(port->connector);
5060 serge 1141
	}
1142
 
6084 serge 1143
out:
5060 serge 1144
	/* put reference to this port */
1145
	drm_dp_put_port(port);
1146
}
1147
 
1148
static void drm_dp_update_port(struct drm_dp_mst_branch *mstb,
1149
			       struct drm_dp_connection_status_notify *conn_stat)
1150
{
1151
	struct drm_dp_mst_port *port;
1152
	int old_pdt;
1153
	int old_ddps;
1154
	bool dowork = false;
1155
	port = drm_dp_get_port(mstb, conn_stat->port_number);
1156
	if (!port)
1157
		return;
1158
 
1159
	old_ddps = port->ddps;
1160
	old_pdt = port->pdt;
1161
	port->pdt = conn_stat->peer_device_type;
1162
	port->mcs = conn_stat->message_capability_status;
1163
	port->ldps = conn_stat->legacy_device_plug_status;
1164
	port->ddps = conn_stat->displayport_device_plug_status;
1165
 
1166
	if (old_ddps != port->ddps) {
1167
		if (port->ddps) {
1168
			drm_dp_check_port_guid(mstb, port);
1169
			dowork = true;
1170
		} else {
1171
			port->guid_valid = false;
1172
			port->available_pbn = 0;
1173
		}
1174
	}
1175
	if (old_pdt != port->pdt && !port->input) {
1176
		drm_dp_port_teardown_pdt(port, old_pdt);
1177
 
1178
		if (drm_dp_port_setup_pdt(port))
1179
			dowork = true;
1180
	}
1181
 
1182
	drm_dp_put_port(port);
1183
//   if (dowork)
1184
//       queue_work(system_long_wq, &mstb->mgr->work);
1185
 
1186
}
1187
 
1188
static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_topology_mgr *mgr,
1189
							       u8 lct, u8 *rad)
1190
{
1191
	struct drm_dp_mst_branch *mstb;
1192
	struct drm_dp_mst_port *port;
1193
	int i;
1194
	/* find the port by iterating down */
6084 serge 1195
 
1196
	mutex_lock(&mgr->lock);
5060 serge 1197
	mstb = mgr->mst_primary;
1198
 
1199
	for (i = 0; i < lct - 1; i++) {
1200
		int shift = (i % 2) ? 0 : 4;
1201
		int port_num = rad[i / 2] >> shift;
1202
 
1203
		list_for_each_entry(port, &mstb->ports, next) {
1204
			if (port->port_num == port_num) {
6084 serge 1205
				mstb = port->mstb;
1206
				if (!mstb) {
5060 serge 1207
					DRM_ERROR("failed to lookup MSTB with lct %d, rad %02x\n", lct, rad[0]);
6084 serge 1208
					goto out;
5060 serge 1209
				}
1210
 
1211
				break;
1212
			}
1213
		}
1214
	}
1215
	kref_get(&mstb->kref);
6084 serge 1216
out:
1217
	mutex_unlock(&mgr->lock);
5060 serge 1218
	return mstb;
1219
}
1220
 
1221
static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
1222
					       struct drm_dp_mst_branch *mstb)
1223
{
1224
	struct drm_dp_mst_port *port;
6084 serge 1225
	struct drm_dp_mst_branch *mstb_child;
1226
	if (!mstb->link_address_sent)
1227
		drm_dp_send_link_address(mgr, mstb);
5060 serge 1228
 
1229
	list_for_each_entry(port, &mstb->ports, next) {
1230
		if (port->input)
1231
			continue;
1232
 
1233
		if (!port->ddps)
1234
			continue;
1235
 
1236
		if (!port->available_pbn)
1237
			drm_dp_send_enum_path_resources(mgr, mstb, port);
1238
 
6084 serge 1239
		if (port->mstb) {
1240
			mstb_child = drm_dp_get_validated_mstb_ref(mgr, port->mstb);
1241
			if (mstb_child) {
1242
				drm_dp_check_and_send_link_address(mgr, mstb_child);
1243
				drm_dp_put_mst_branch_device(mstb_child);
1244
			}
1245
		}
5060 serge 1246
	}
1247
}
1248
 
1249
static void drm_dp_mst_link_probe_work(struct work_struct *work)
1250
{
1251
	struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, work);
6084 serge 1252
	struct drm_dp_mst_branch *mstb;
5060 serge 1253
 
6084 serge 1254
	mutex_lock(&mgr->lock);
1255
	mstb = mgr->mst_primary;
1256
	if (mstb) {
1257
		kref_get(&mstb->kref);
1258
	}
1259
	mutex_unlock(&mgr->lock);
1260
	if (mstb) {
1261
		drm_dp_check_and_send_link_address(mgr, mstb);
1262
		drm_dp_put_mst_branch_device(mstb);
1263
	}
5060 serge 1264
}
1265
 
1266
static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
1267
				 u8 *guid)
1268
{
1269
	static u8 zero_guid[16];
1270
 
1271
	if (!memcmp(guid, zero_guid, 16)) {
1272
		u64 salt = get_jiffies_64();
1273
		memcpy(&guid[0], &salt, sizeof(u64));
1274
		memcpy(&guid[8], &salt, sizeof(u64));
1275
		return false;
1276
	}
1277
	return true;
1278
}
1279
 
1280
#if 0
1281
static int build_dpcd_read(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32 offset, u8 num_bytes)
1282
{
1283
	struct drm_dp_sideband_msg_req_body req;
1284
 
1285
	req.req_type = DP_REMOTE_DPCD_READ;
1286
	req.u.dpcd_read.port_number = port_num;
1287
	req.u.dpcd_read.dpcd_address = offset;
1288
	req.u.dpcd_read.num_bytes = num_bytes;
1289
	drm_dp_encode_sideband_req(&req, msg);
1290
 
1291
	return 0;
1292
}
1293
#endif
1294
 
1295
static int drm_dp_send_sideband_msg(struct drm_dp_mst_topology_mgr *mgr,
1296
				    bool up, u8 *msg, int len)
1297
{
1298
	int ret;
1299
	int regbase = up ? DP_SIDEBAND_MSG_UP_REP_BASE : DP_SIDEBAND_MSG_DOWN_REQ_BASE;
1300
	int tosend, total, offset;
1301
	int retries = 0;
1302
 
1303
retry:
1304
	total = len;
1305
	offset = 0;
1306
	do {
1307
		tosend = min3(mgr->max_dpcd_transaction_bytes, 16, total);
1308
 
1309
		ret = drm_dp_dpcd_write(mgr->aux, regbase + offset,
1310
					&msg[offset],
1311
					tosend);
1312
		if (ret != tosend) {
1313
			if (ret == -EIO && retries < 5) {
1314
				retries++;
1315
				goto retry;
1316
			}
1317
			DRM_DEBUG_KMS("failed to dpcd write %d %d\n", tosend, ret);
1318
 
1319
			return -EIO;
1320
		}
1321
		offset += tosend;
1322
		total -= tosend;
1323
	} while (total > 0);
1324
	return 0;
1325
}
1326
 
1327
static int set_hdr_from_dst_qlock(struct drm_dp_sideband_msg_hdr *hdr,
1328
				  struct drm_dp_sideband_msg_tx *txmsg)
1329
{
1330
	struct drm_dp_mst_branch *mstb = txmsg->dst;
1331
 
1332
	/* both msg slots are full */
1333
	if (txmsg->seqno == -1) {
1334
		if (mstb->tx_slots[0] && mstb->tx_slots[1]) {
1335
			DRM_DEBUG_KMS("%s: failed to find slot\n", __func__);
1336
			return -EAGAIN;
1337
		}
1338
		if (mstb->tx_slots[0] == NULL && mstb->tx_slots[1] == NULL) {
1339
			txmsg->seqno = mstb->last_seqno;
1340
			mstb->last_seqno ^= 1;
1341
		} else if (mstb->tx_slots[0] == NULL)
1342
			txmsg->seqno = 0;
1343
		else
1344
			txmsg->seqno = 1;
1345
		mstb->tx_slots[txmsg->seqno] = txmsg;
1346
	}
1347
	hdr->broadcast = 0;
1348
	hdr->path_msg = txmsg->path_msg;
1349
	hdr->lct = mstb->lct;
1350
	hdr->lcr = mstb->lct - 1;
1351
	if (mstb->lct > 1)
1352
		memcpy(hdr->rad, mstb->rad, mstb->lct / 2);
1353
	hdr->seqno = txmsg->seqno;
1354
	return 0;
1355
}
1356
/*
1357
 * process a single block of the next message in the sideband queue
1358
 */
1359
static int process_single_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
1360
				   struct drm_dp_sideband_msg_tx *txmsg,
1361
				   bool up)
1362
{
1363
	u8 chunk[48];
1364
	struct drm_dp_sideband_msg_hdr hdr;
1365
	int len, space, idx, tosend;
1366
	int ret;
1367
 
1368
	memset(&hdr, 0, sizeof(struct drm_dp_sideband_msg_hdr));
1369
 
1370
	if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED) {
1371
		txmsg->seqno = -1;
1372
		txmsg->state = DRM_DP_SIDEBAND_TX_START_SEND;
1373
	}
1374
 
1375
	/* make hdr from dst mst - for replies use seqno
1376
	   otherwise assign one */
1377
	ret = set_hdr_from_dst_qlock(&hdr, txmsg);
1378
	if (ret < 0)
1379
		return ret;
1380
 
1381
	/* amount left to send in this message */
1382
	len = txmsg->cur_len - txmsg->cur_offset;
1383
 
1384
	/* 48 - sideband msg size - 1 byte for data CRC, x header bytes */
1385
	space = 48 - 1 - drm_dp_calc_sb_hdr_size(&hdr);
1386
 
1387
	tosend = min(len, space);
1388
	if (len == txmsg->cur_len)
1389
		hdr.somt = 1;
1390
	if (space >= len)
1391
		hdr.eomt = 1;
1392
 
1393
 
1394
	hdr.msg_len = tosend + 1;
1395
	drm_dp_encode_sideband_msg_hdr(&hdr, chunk, &idx);
1396
	memcpy(&chunk[idx], &txmsg->msg[txmsg->cur_offset], tosend);
1397
	/* add crc at end */
1398
	drm_dp_crc_sideband_chunk_req(&chunk[idx], tosend);
1399
	idx += tosend + 1;
1400
 
1401
	ret = drm_dp_send_sideband_msg(mgr, up, chunk, idx);
1402
	if (ret) {
1403
		DRM_DEBUG_KMS("sideband msg failed to send\n");
1404
		return ret;
1405
	}
1406
 
1407
	txmsg->cur_offset += tosend;
1408
	if (txmsg->cur_offset == txmsg->cur_len) {
1409
		txmsg->state = DRM_DP_SIDEBAND_TX_SENT;
1410
		return 1;
1411
	}
1412
	return 0;
1413
}
1414
 
1415
static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
1416
{
1417
	struct drm_dp_sideband_msg_tx *txmsg;
1418
	int ret;
1419
 
6084 serge 1420
	WARN_ON(!mutex_is_locked(&mgr->qlock));
1421
 
5060 serge 1422
	/* construct a chunk from the first msg in the tx_msg queue */
1423
	if (list_empty(&mgr->tx_msg_downq)) {
1424
		mgr->tx_down_in_progress = false;
1425
		return;
1426
	}
1427
	mgr->tx_down_in_progress = true;
1428
 
1429
	txmsg = list_first_entry(&mgr->tx_msg_downq, struct drm_dp_sideband_msg_tx, next);
1430
	ret = process_single_tx_qlock(mgr, txmsg, false);
1431
	if (ret == 1) {
1432
		/* txmsg is sent it should be in the slots now */
1433
		list_del(&txmsg->next);
1434
	} else if (ret) {
1435
		DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
1436
		list_del(&txmsg->next);
1437
		if (txmsg->seqno != -1)
1438
			txmsg->dst->tx_slots[txmsg->seqno] = NULL;
1439
		txmsg->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
1440
//       wake_up(&mgr->tx_waitq);
1441
	}
1442
	if (list_empty(&mgr->tx_msg_downq)) {
1443
		mgr->tx_down_in_progress = false;
1444
		return;
1445
	}
1446
}
1447
 
1448
/* called holding qlock */
1449
static void process_single_up_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
1450
{
1451
	struct drm_dp_sideband_msg_tx *txmsg;
1452
	int ret;
1453
 
1454
	/* construct a chunk from the first msg in the tx_msg queue */
1455
	if (list_empty(&mgr->tx_msg_upq)) {
1456
		mgr->tx_up_in_progress = false;
1457
		return;
1458
	}
1459
 
1460
	txmsg = list_first_entry(&mgr->tx_msg_upq, struct drm_dp_sideband_msg_tx, next);
1461
	ret = process_single_tx_qlock(mgr, txmsg, true);
1462
	if (ret == 1) {
1463
		/* up txmsgs aren't put in slots - so free after we send it */
1464
		list_del(&txmsg->next);
1465
		kfree(txmsg);
1466
	} else if (ret)
1467
		DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
1468
	mgr->tx_up_in_progress = true;
1469
}
1470
 
1471
static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,
1472
				 struct drm_dp_sideband_msg_tx *txmsg)
1473
{
1474
	mutex_lock(&mgr->qlock);
1475
	list_add_tail(&txmsg->next, &mgr->tx_msg_downq);
1476
	if (!mgr->tx_down_in_progress)
1477
		process_single_down_tx_qlock(mgr);
1478
	mutex_unlock(&mgr->qlock);
1479
}
1480
 
6084 serge 1481
static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
1482
				     struct drm_dp_mst_branch *mstb)
5060 serge 1483
{
1484
	int len;
1485
	struct drm_dp_sideband_msg_tx *txmsg;
1486
	int ret;
1487
 
1488
	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
1489
	if (!txmsg)
6084 serge 1490
		return;
5060 serge 1491
 
1492
	txmsg->dst = mstb;
1493
	len = build_link_address(txmsg);
1494
 
6084 serge 1495
	mstb->link_address_sent = true;
5060 serge 1496
	drm_dp_queue_down_tx(mgr, txmsg);
1497
 
1498
	ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
1499
	if (ret > 0) {
1500
		int i;
1501
 
1502
		if (txmsg->reply.reply_type == 1)
1503
			DRM_DEBUG_KMS("link address nak received\n");
1504
		else {
1505
			DRM_DEBUG_KMS("link address reply: %d\n", txmsg->reply.u.link_addr.nports);
1506
			for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) {
1507
				DRM_DEBUG_KMS("port %d: input %d, pdt: %d, pn: %d, dpcd_rev: %02x, mcs: %d, ddps: %d, ldps %d, sdp %d/%d\n", i,
1508
				       txmsg->reply.u.link_addr.ports[i].input_port,
1509
				       txmsg->reply.u.link_addr.ports[i].peer_device_type,
1510
				       txmsg->reply.u.link_addr.ports[i].port_number,
1511
				       txmsg->reply.u.link_addr.ports[i].dpcd_revision,
1512
				       txmsg->reply.u.link_addr.ports[i].mcs,
1513
				       txmsg->reply.u.link_addr.ports[i].ddps,
1514
				       txmsg->reply.u.link_addr.ports[i].legacy_device_plug_status,
1515
				       txmsg->reply.u.link_addr.ports[i].num_sdp_streams,
1516
				       txmsg->reply.u.link_addr.ports[i].num_sdp_stream_sinks);
1517
			}
1518
			for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) {
1519
				drm_dp_add_port(mstb, mgr->dev, &txmsg->reply.u.link_addr.ports[i]);
1520
			}
1521
			(*mgr->cbs->hotplug)(mgr);
1522
		}
6084 serge 1523
	} else {
1524
		mstb->link_address_sent = false;
5060 serge 1525
		DRM_DEBUG_KMS("link address failed %d\n", ret);
6084 serge 1526
	}
5060 serge 1527
 
1528
	kfree(txmsg);
1529
}
1530
 
1531
static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
1532
					   struct drm_dp_mst_branch *mstb,
1533
					   struct drm_dp_mst_port *port)
1534
{
1535
	int len;
1536
	struct drm_dp_sideband_msg_tx *txmsg;
1537
	int ret;
1538
 
1539
	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
1540
	if (!txmsg)
1541
		return -ENOMEM;
1542
 
1543
	txmsg->dst = mstb;
1544
	len = build_enum_path_resources(txmsg, port->port_num);
1545
 
1546
	drm_dp_queue_down_tx(mgr, txmsg);
1547
 
1548
	ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
1549
	if (ret > 0) {
1550
		if (txmsg->reply.reply_type == 1)
1551
			DRM_DEBUG_KMS("enum path resources nak received\n");
1552
		else {
1553
			if (port->port_num != txmsg->reply.u.path_resources.port_number)
1554
				DRM_ERROR("got incorrect port in response\n");
1555
			DRM_DEBUG_KMS("enum path resources %d: %d %d\n", txmsg->reply.u.path_resources.port_number, txmsg->reply.u.path_resources.full_payload_bw_number,
1556
			       txmsg->reply.u.path_resources.avail_payload_bw_number);
1557
			port->available_pbn = txmsg->reply.u.path_resources.avail_payload_bw_number;
1558
		}
1559
	}
1560
 
1561
	kfree(txmsg);
1562
	return 0;
1563
}
1564
 
1565
static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
1566
				   struct drm_dp_mst_port *port,
1567
				   int id,
1568
				   int pbn)
1569
{
1570
	struct drm_dp_sideband_msg_tx *txmsg;
1571
	struct drm_dp_mst_branch *mstb;
1572
	int len, ret;
1573
 
1574
	mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
1575
	if (!mstb)
1576
		return -EINVAL;
1577
 
1578
	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
1579
	if (!txmsg) {
1580
		ret = -ENOMEM;
1581
		goto fail_put;
1582
	}
1583
 
1584
	txmsg->dst = mstb;
1585
	len = build_allocate_payload(txmsg, port->port_num,
1586
				     id,
1587
				     pbn);
1588
 
1589
	drm_dp_queue_down_tx(mgr, txmsg);
1590
 
1591
	ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
1592
	if (ret > 0) {
1593
		if (txmsg->reply.reply_type == 1) {
1594
			ret = -EINVAL;
1595
		} else
1596
			ret = 0;
1597
	}
1598
	kfree(txmsg);
1599
fail_put:
1600
	drm_dp_put_mst_branch_device(mstb);
1601
	return ret;
1602
}
1603
 
1604
static int drm_dp_create_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
1605
				       int id,
1606
				       struct drm_dp_payload *payload)
1607
{
1608
	int ret;
1609
 
1610
	ret = drm_dp_dpcd_write_payload(mgr, id, payload);
1611
	if (ret < 0) {
1612
		payload->payload_state = 0;
1613
		return ret;
1614
	}
1615
	payload->payload_state = DP_PAYLOAD_LOCAL;
1616
	return 0;
1617
}
1618
 
1619
static int drm_dp_create_payload_step2(struct drm_dp_mst_topology_mgr *mgr,
1620
				       struct drm_dp_mst_port *port,
1621
				       int id,
1622
				       struct drm_dp_payload *payload)
1623
{
1624
	int ret;
1625
	ret = drm_dp_payload_send_msg(mgr, port, id, port->vcpi.pbn);
1626
	if (ret < 0)
1627
		return ret;
1628
	payload->payload_state = DP_PAYLOAD_REMOTE;
1629
	return ret;
1630
}
1631
 
1632
static int drm_dp_destroy_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
1633
					struct drm_dp_mst_port *port,
1634
					int id,
1635
					struct drm_dp_payload *payload)
1636
{
1637
	DRM_DEBUG_KMS("\n");
1638
	/* its okay for these to fail */
1639
	if (port) {
1640
		drm_dp_payload_send_msg(mgr, port, id, 0);
1641
	}
1642
 
1643
	drm_dp_dpcd_write_payload(mgr, id, payload);
5271 serge 1644
	payload->payload_state = DP_PAYLOAD_DELETE_LOCAL;
5060 serge 1645
	return 0;
1646
}
1647
 
1648
static int drm_dp_destroy_payload_step2(struct drm_dp_mst_topology_mgr *mgr,
1649
					int id,
1650
					struct drm_dp_payload *payload)
1651
{
1652
	payload->payload_state = 0;
1653
	return 0;
1654
}
1655
 
1656
/**
1657
 * drm_dp_update_payload_part1() - Execute payload update part 1
1658
 * @mgr: manager to use.
1659
 *
1660
 * This iterates over all proposed virtual channels, and tries to
1661
 * allocate space in the link for them. For 0->slots transitions,
1662
 * this step just writes the VCPI to the MST device. For slots->0
1663
 * transitions, this writes the updated VCPIs and removes the
1664
 * remote VC payloads.
1665
 *
1666
 * after calling this the driver should generate ACT and payload
1667
 * packets.
1668
 */
1669
int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
1670
{
5271 serge 1671
	int i, j;
5060 serge 1672
	int cur_slots = 1;
1673
	struct drm_dp_payload req_payload;
1674
	struct drm_dp_mst_port *port;
1675
 
1676
	mutex_lock(&mgr->payload_lock);
1677
	for (i = 0; i < mgr->max_payloads; i++) {
1678
		/* solve the current payloads - compare to the hw ones
1679
		   - update the hw view */
1680
		req_payload.start_slot = cur_slots;
1681
		if (mgr->proposed_vcpis[i]) {
1682
			port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
1683
			req_payload.num_slots = mgr->proposed_vcpis[i]->num_slots;
1684
		} else {
1685
			port = NULL;
1686
			req_payload.num_slots = 0;
1687
		}
5271 serge 1688
 
1689
		if (mgr->payloads[i].start_slot != req_payload.start_slot) {
1690
			mgr->payloads[i].start_slot = req_payload.start_slot;
1691
		}
5060 serge 1692
		/* work out what is required to happen with this payload */
5271 serge 1693
		if (mgr->payloads[i].num_slots != req_payload.num_slots) {
5060 serge 1694
 
1695
			/* need to push an update for this payload */
1696
			if (req_payload.num_slots) {
5271 serge 1697
				drm_dp_create_payload_step1(mgr, mgr->proposed_vcpis[i]->vcpi, &req_payload);
5060 serge 1698
				mgr->payloads[i].num_slots = req_payload.num_slots;
1699
			} else if (mgr->payloads[i].num_slots) {
1700
				mgr->payloads[i].num_slots = 0;
5271 serge 1701
				drm_dp_destroy_payload_step1(mgr, port, port->vcpi.vcpi, &mgr->payloads[i]);
5060 serge 1702
				req_payload.payload_state = mgr->payloads[i].payload_state;
5271 serge 1703
				mgr->payloads[i].start_slot = 0;
1704
			}
5060 serge 1705
			mgr->payloads[i].payload_state = req_payload.payload_state;
1706
		}
1707
		cur_slots += req_payload.num_slots;
1708
	}
5271 serge 1709
 
1710
	for (i = 0; i < mgr->max_payloads; i++) {
1711
		if (mgr->payloads[i].payload_state == DP_PAYLOAD_DELETE_LOCAL) {
1712
			DRM_DEBUG_KMS("removing payload %d\n", i);
1713
			for (j = i; j < mgr->max_payloads - 1; j++) {
1714
				memcpy(&mgr->payloads[j], &mgr->payloads[j + 1], sizeof(struct drm_dp_payload));
1715
				mgr->proposed_vcpis[j] = mgr->proposed_vcpis[j + 1];
1716
				if (mgr->proposed_vcpis[j] && mgr->proposed_vcpis[j]->num_slots) {
1717
					set_bit(j + 1, &mgr->payload_mask);
1718
				} else {
1719
					clear_bit(j + 1, &mgr->payload_mask);
1720
				}
1721
			}
1722
			memset(&mgr->payloads[mgr->max_payloads - 1], 0, sizeof(struct drm_dp_payload));
1723
			mgr->proposed_vcpis[mgr->max_payloads - 1] = NULL;
1724
			clear_bit(mgr->max_payloads, &mgr->payload_mask);
1725
 
1726
		}
1727
	}
5060 serge 1728
	mutex_unlock(&mgr->payload_lock);
1729
 
1730
	return 0;
1731
}
1732
EXPORT_SYMBOL(drm_dp_update_payload_part1);
1733
 
1734
/**
1735
 * drm_dp_update_payload_part2() - Execute payload update part 2
1736
 * @mgr: manager to use.
1737
 *
1738
 * This iterates over all proposed virtual channels, and tries to
1739
 * allocate space in the link for them. For 0->slots transitions,
1740
 * this step writes the remote VC payload commands. For slots->0
1741
 * this just resets some internal state.
1742
 */
1743
int drm_dp_update_payload_part2(struct drm_dp_mst_topology_mgr *mgr)
1744
{
1745
	struct drm_dp_mst_port *port;
1746
	int i;
1747
	int ret = 0;
1748
	mutex_lock(&mgr->payload_lock);
1749
	for (i = 0; i < mgr->max_payloads; i++) {
1750
 
1751
		if (!mgr->proposed_vcpis[i])
1752
			continue;
1753
 
1754
		port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
1755
 
1756
		DRM_DEBUG_KMS("payload %d %d\n", i, mgr->payloads[i].payload_state);
1757
		if (mgr->payloads[i].payload_state == DP_PAYLOAD_LOCAL) {
5271 serge 1758
			ret = drm_dp_create_payload_step2(mgr, port, mgr->proposed_vcpis[i]->vcpi, &mgr->payloads[i]);
5060 serge 1759
		} else if (mgr->payloads[i].payload_state == DP_PAYLOAD_DELETE_LOCAL) {
5271 serge 1760
			ret = drm_dp_destroy_payload_step2(mgr, mgr->proposed_vcpis[i]->vcpi, &mgr->payloads[i]);
5060 serge 1761
		}
1762
		if (ret) {
1763
			mutex_unlock(&mgr->payload_lock);
1764
			return ret;
1765
		}
1766
	}
1767
	mutex_unlock(&mgr->payload_lock);
1768
	return 0;
1769
}
1770
EXPORT_SYMBOL(drm_dp_update_payload_part2);
1771
 
1772
#if 0 /* unused as of yet */
1773
static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
1774
				 struct drm_dp_mst_port *port,
1775
				 int offset, int size)
1776
{
1777
	int len;
1778
	struct drm_dp_sideband_msg_tx *txmsg;
1779
 
1780
	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
1781
	if (!txmsg)
1782
		return -ENOMEM;
1783
 
1784
	len = build_dpcd_read(txmsg, port->port_num, 0, 8);
1785
	txmsg->dst = port->parent;
1786
 
1787
	drm_dp_queue_down_tx(mgr, txmsg);
1788
 
1789
	return 0;
1790
}
1791
#endif
1792
 
1793
static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
1794
				  struct drm_dp_mst_port *port,
1795
				  int offset, int size, u8 *bytes)
1796
{
1797
	int len;
1798
	int ret;
1799
	struct drm_dp_sideband_msg_tx *txmsg;
1800
	struct drm_dp_mst_branch *mstb;
1801
 
1802
	mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
1803
	if (!mstb)
1804
		return -EINVAL;
1805
 
1806
	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
1807
	if (!txmsg) {
1808
		ret = -ENOMEM;
1809
		goto fail_put;
1810
	}
1811
 
1812
	len = build_dpcd_write(txmsg, port->port_num, offset, size, bytes);
1813
	txmsg->dst = mstb;
1814
 
1815
	drm_dp_queue_down_tx(mgr, txmsg);
1816
 
1817
	ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
1818
	if (ret > 0) {
1819
		if (txmsg->reply.reply_type == 1) {
1820
			ret = -EINVAL;
1821
		} else
1822
			ret = 0;
1823
	}
1824
	kfree(txmsg);
1825
fail_put:
1826
	drm_dp_put_mst_branch_device(mstb);
1827
	return ret;
1828
}
1829
 
1830
static int drm_dp_encode_up_ack_reply(struct drm_dp_sideband_msg_tx *msg, u8 req_type)
1831
{
1832
	struct drm_dp_sideband_msg_reply_body reply;
1833
 
1834
	reply.reply_type = 1;
1835
	reply.req_type = req_type;
1836
	drm_dp_encode_sideband_reply(&reply, msg);
1837
	return 0;
1838
}
1839
 
1840
static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr,
1841
				    struct drm_dp_mst_branch *mstb,
1842
				    int req_type, int seqno, bool broadcast)
1843
{
1844
	struct drm_dp_sideband_msg_tx *txmsg;
1845
 
1846
	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
1847
	if (!txmsg)
1848
		return -ENOMEM;
1849
 
1850
	txmsg->dst = mstb;
1851
	txmsg->seqno = seqno;
1852
	drm_dp_encode_up_ack_reply(txmsg, req_type);
1853
 
1854
	mutex_lock(&mgr->qlock);
1855
	list_add_tail(&txmsg->next, &mgr->tx_msg_upq);
1856
	if (!mgr->tx_up_in_progress) {
1857
		process_single_up_tx_qlock(mgr);
1858
	}
1859
	mutex_unlock(&mgr->qlock);
1860
	return 0;
1861
}
1862
 
5271 serge 1863
static bool drm_dp_get_vc_payload_bw(int dp_link_bw,
1864
				     int dp_link_count,
1865
				     int *out)
5060 serge 1866
{
1867
	switch (dp_link_bw) {
5271 serge 1868
	default:
1869
		DRM_DEBUG_KMS("invalid link bandwidth in DPCD: %x (link count: %d)\n",
1870
			      dp_link_bw, dp_link_count);
1871
		return false;
1872
 
5060 serge 1873
	case DP_LINK_BW_1_62:
5271 serge 1874
		*out = 3 * dp_link_count;
1875
		break;
5060 serge 1876
	case DP_LINK_BW_2_7:
5271 serge 1877
		*out = 5 * dp_link_count;
1878
		break;
5060 serge 1879
	case DP_LINK_BW_5_4:
5271 serge 1880
		*out = 10 * dp_link_count;
1881
		break;
5060 serge 1882
	}
5271 serge 1883
	return true;
5060 serge 1884
}
1885
 
1886
/**
1887
 * drm_dp_mst_topology_mgr_set_mst() - Set the MST state for a topology manager
1888
 * @mgr: manager to set state for
1889
 * @mst_state: true to enable MST on this connector - false to disable.
1890
 *
1891
 * This is called by the driver when it detects an MST capable device plugged
1892
 * into a DP MST capable port, or when a DP MST capable device is unplugged.
1893
 */
1894
int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool mst_state)
1895
{
1896
	int ret = 0;
1897
	struct drm_dp_mst_branch *mstb = NULL;
1898
 
1899
	mutex_lock(&mgr->lock);
1900
	if (mst_state == mgr->mst_state)
1901
		goto out_unlock;
1902
 
1903
	mgr->mst_state = mst_state;
1904
	/* set the device into MST mode */
1905
	if (mst_state) {
1906
		WARN_ON(mgr->mst_primary);
1907
 
1908
		/* get dpcd info */
1909
		ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE);
1910
		if (ret != DP_RECEIVER_CAP_SIZE) {
1911
			DRM_DEBUG_KMS("failed to read DPCD\n");
1912
			goto out_unlock;
1913
		}
1914
 
5271 serge 1915
		if (!drm_dp_get_vc_payload_bw(mgr->dpcd[1],
1916
					      mgr->dpcd[2] & DP_MAX_LANE_COUNT_MASK,
1917
					      &mgr->pbn_div)) {
1918
			ret = -EINVAL;
1919
			goto out_unlock;
1920
		}
1921
 
5060 serge 1922
		mgr->total_pbn = 2560;
1923
		mgr->total_slots = DIV_ROUND_UP(mgr->total_pbn, mgr->pbn_div);
1924
		mgr->avail_slots = mgr->total_slots;
1925
 
1926
		/* add initial branch device at LCT 1 */
1927
		mstb = drm_dp_add_mst_branch_device(1, NULL);
1928
		if (mstb == NULL) {
1929
			ret = -ENOMEM;
1930
			goto out_unlock;
1931
		}
1932
		mstb->mgr = mgr;
1933
 
1934
		/* give this the main reference */
1935
		mgr->mst_primary = mstb;
1936
		kref_get(&mgr->mst_primary->kref);
1937
 
1938
		{
1939
			struct drm_dp_payload reset_pay;
1940
			reset_pay.start_slot = 0;
1941
			reset_pay.num_slots = 0x3f;
1942
			drm_dp_dpcd_write_payload(mgr, 0, &reset_pay);
1943
		}
1944
 
1945
		ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
1946
					 DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
1947
		if (ret < 0) {
1948
			goto out_unlock;
1949
		}
1950
 
1951
 
1952
		/* sort out guid */
1953
		ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, mgr->guid, 16);
1954
		if (ret != 16) {
1955
			DRM_DEBUG_KMS("failed to read DP GUID %d\n", ret);
1956
			goto out_unlock;
1957
		}
1958
 
1959
		mgr->guid_valid = drm_dp_validate_guid(mgr, mgr->guid);
1960
		if (!mgr->guid_valid) {
1961
			ret = drm_dp_dpcd_write(mgr->aux, DP_GUID, mgr->guid, 16);
1962
			mgr->guid_valid = true;
1963
		}
1964
 
1965
//       queue_work(system_long_wq, &mgr->work);
1966
 
1967
		ret = 0;
1968
	} else {
1969
		/* disable MST on the device */
1970
		mstb = mgr->mst_primary;
1971
		mgr->mst_primary = NULL;
1972
		/* this can fail if the device is gone */
1973
		drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 0);
1974
		ret = 0;
1975
		memset(mgr->payloads, 0, mgr->max_payloads * sizeof(struct drm_dp_payload));
1976
		mgr->payload_mask = 0;
1977
		set_bit(0, &mgr->payload_mask);
5271 serge 1978
		mgr->vcpi_mask = 0;
5060 serge 1979
	}
1980
 
1981
out_unlock:
1982
	mutex_unlock(&mgr->lock);
1983
	if (mstb)
1984
		drm_dp_put_mst_branch_device(mstb);
1985
	return ret;
1986
 
1987
}
1988
EXPORT_SYMBOL(drm_dp_mst_topology_mgr_set_mst);
1989
 
1990
/**
1991
 * drm_dp_mst_topology_mgr_suspend() - suspend the MST manager
1992
 * @mgr: manager to suspend
1993
 *
1994
 * This function tells the MST device that we can't handle UP messages
1995
 * anymore. This should stop it from sending any since we are suspended.
1996
 */
1997
void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr)
1998
{
1999
	mutex_lock(&mgr->lock);
2000
	drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
2001
			   DP_MST_EN | DP_UPSTREAM_IS_SRC);
2002
	mutex_unlock(&mgr->lock);
2003
}
2004
EXPORT_SYMBOL(drm_dp_mst_topology_mgr_suspend);
2005
 
2006
/**
2007
 * drm_dp_mst_topology_mgr_resume() - resume the MST manager
2008
 * @mgr: manager to resume
2009
 *
2010
 * This will fetch DPCD and see if the device is still there,
2011
 * if it is, it will rewrite the MSTM control bits, and return.
2012
 *
2013
 * if the device fails this returns -1, and the driver should do
2014
 * a full MST reprobe, in case we were undocked.
2015
 */
2016
int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr)
2017
{
2018
	int ret = 0;
2019
 
2020
	mutex_lock(&mgr->lock);
2021
 
2022
	if (mgr->mst_primary) {
2023
		int sret;
2024
		sret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE);
2025
		if (sret != DP_RECEIVER_CAP_SIZE) {
2026
			DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
2027
			ret = -1;
2028
			goto out_unlock;
2029
		}
2030
 
2031
		ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
2032
					 DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
2033
		if (ret < 0) {
2034
			DRM_DEBUG_KMS("mst write failed - undocked during suspend?\n");
2035
			ret = -1;
2036
			goto out_unlock;
2037
		}
2038
		ret = 0;
2039
	} else
2040
		ret = -1;
2041
 
2042
out_unlock:
2043
	mutex_unlock(&mgr->lock);
2044
	return ret;
2045
}
2046
EXPORT_SYMBOL(drm_dp_mst_topology_mgr_resume);
2047
 
2048
static void drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up)
2049
{
2050
	int len;
2051
	u8 replyblock[32];
2052
	int replylen, origlen, curreply;
2053
	int ret;
2054
	struct drm_dp_sideband_msg_rx *msg;
2055
	int basereg = up ? DP_SIDEBAND_MSG_UP_REQ_BASE : DP_SIDEBAND_MSG_DOWN_REP_BASE;
2056
	msg = up ? &mgr->up_req_recv : &mgr->down_rep_recv;
2057
 
2058
	len = min(mgr->max_dpcd_transaction_bytes, 16);
2059
	ret = drm_dp_dpcd_read(mgr->aux, basereg,
2060
			       replyblock, len);
2061
	if (ret != len) {
2062
		DRM_DEBUG_KMS("failed to read DPCD down rep %d %d\n", len, ret);
2063
		return;
2064
	}
2065
	ret = drm_dp_sideband_msg_build(msg, replyblock, len, true);
2066
	if (!ret) {
2067
		DRM_DEBUG_KMS("sideband msg build failed %d\n", replyblock[0]);
2068
		return;
2069
	}
2070
	replylen = msg->curchunk_len + msg->curchunk_hdrlen;
2071
 
2072
	origlen = replylen;
2073
	replylen -= len;
2074
	curreply = len;
2075
	while (replylen > 0) {
2076
		len = min3(replylen, mgr->max_dpcd_transaction_bytes, 16);
2077
		ret = drm_dp_dpcd_read(mgr->aux, basereg + curreply,
2078
				    replyblock, len);
2079
		if (ret != len) {
2080
			DRM_DEBUG_KMS("failed to read a chunk\n");
2081
		}
2082
		ret = drm_dp_sideband_msg_build(msg, replyblock, len, false);
2083
		if (ret == false)
2084
			DRM_DEBUG_KMS("failed to build sideband msg\n");
2085
		curreply += len;
2086
		replylen -= len;
2087
	}
2088
}
2089
 
2090
static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
2091
{
2092
	int ret = 0;
2093
 
2094
	drm_dp_get_one_sb_msg(mgr, false);
2095
 
2096
	if (mgr->down_rep_recv.have_eomt) {
2097
		struct drm_dp_sideband_msg_tx *txmsg;
2098
		struct drm_dp_mst_branch *mstb;
2099
		int slot = -1;
2100
		mstb = drm_dp_get_mst_branch_device(mgr,
2101
						    mgr->down_rep_recv.initial_hdr.lct,
2102
						    mgr->down_rep_recv.initial_hdr.rad);
2103
 
2104
		if (!mstb) {
2105
			DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->down_rep_recv.initial_hdr.lct);
2106
			memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2107
			return 0;
2108
		}
2109
 
2110
		/* find the message */
2111
		slot = mgr->down_rep_recv.initial_hdr.seqno;
2112
		mutex_lock(&mgr->qlock);
2113
		txmsg = mstb->tx_slots[slot];
2114
		/* remove from slots */
2115
		mutex_unlock(&mgr->qlock);
2116
 
2117
		if (!txmsg) {
2118
			DRM_DEBUG_KMS("Got MST reply with no msg %p %d %d %02x %02x\n",
2119
			       mstb,
2120
			       mgr->down_rep_recv.initial_hdr.seqno,
2121
			       mgr->down_rep_recv.initial_hdr.lct,
2122
				      mgr->down_rep_recv.initial_hdr.rad[0],
2123
				      mgr->down_rep_recv.msg[0]);
2124
			drm_dp_put_mst_branch_device(mstb);
2125
			memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2126
			return 0;
2127
		}
2128
 
2129
		drm_dp_sideband_parse_reply(&mgr->down_rep_recv, &txmsg->reply);
2130
		if (txmsg->reply.reply_type == 1) {
2131
			DRM_DEBUG_KMS("Got NAK reply: req 0x%02x, reason 0x%02x, nak data 0x%02x\n", txmsg->reply.req_type, txmsg->reply.u.nak.reason, txmsg->reply.u.nak.nak_data);
2132
		}
2133
 
2134
		memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2135
		drm_dp_put_mst_branch_device(mstb);
2136
 
2137
		mutex_lock(&mgr->qlock);
2138
		txmsg->state = DRM_DP_SIDEBAND_TX_RX;
2139
		mstb->tx_slots[slot] = NULL;
2140
		mutex_unlock(&mgr->qlock);
2141
 
2142
//       wake_up(&mgr->tx_waitq);
2143
	}
2144
	return ret;
2145
}
2146
 
2147
static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
2148
{
2149
	int ret = 0;
2150
	drm_dp_get_one_sb_msg(mgr, true);
2151
 
2152
	if (mgr->up_req_recv.have_eomt) {
2153
		struct drm_dp_sideband_msg_req_body msg;
2154
		struct drm_dp_mst_branch *mstb;
2155
		bool seqno;
2156
		mstb = drm_dp_get_mst_branch_device(mgr,
2157
						    mgr->up_req_recv.initial_hdr.lct,
2158
						    mgr->up_req_recv.initial_hdr.rad);
2159
		if (!mstb) {
2160
			DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
2161
			memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2162
			return 0;
2163
		}
2164
 
2165
		seqno = mgr->up_req_recv.initial_hdr.seqno;
2166
		drm_dp_sideband_parse_req(&mgr->up_req_recv, &msg);
2167
 
2168
		if (msg.req_type == DP_CONNECTION_STATUS_NOTIFY) {
2169
			drm_dp_send_up_ack_reply(mgr, mstb, msg.req_type, seqno, false);
2170
			drm_dp_update_port(mstb, &msg.u.conn_stat);
2171
			DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n", msg.u.conn_stat.port_number, msg.u.conn_stat.legacy_device_plug_status, msg.u.conn_stat.displayport_device_plug_status, msg.u.conn_stat.message_capability_status, msg.u.conn_stat.input_port, msg.u.conn_stat.peer_device_type);
2172
			(*mgr->cbs->hotplug)(mgr);
2173
 
2174
		} else if (msg.req_type == DP_RESOURCE_STATUS_NOTIFY) {
2175
			drm_dp_send_up_ack_reply(mgr, mstb, msg.req_type, seqno, false);
2176
			DRM_DEBUG_KMS("Got RSN: pn: %d avail_pbn %d\n", msg.u.resource_stat.port_number, msg.u.resource_stat.available_pbn);
2177
		}
2178
 
2179
		drm_dp_put_mst_branch_device(mstb);
2180
		memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2181
	}
2182
	return ret;
2183
}
2184
 
2185
/**
2186
 * drm_dp_mst_hpd_irq() - MST hotplug IRQ notify
2187
 * @mgr: manager to notify irq for.
2188
 * @esi: 4 bytes from SINK_COUNT_ESI
5271 serge 2189
 * @handled: whether the hpd interrupt was consumed or not
5060 serge 2190
 *
2191
 * This should be called from the driver when it detects a short IRQ,
2192
 * along with the value of the DEVICE_SERVICE_IRQ_VECTOR_ESI0. The
2193
 * topology manager will process the sideband messages received as a result
2194
 * of this.
2195
 */
2196
int drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr *mgr, u8 *esi, bool *handled)
2197
{
2198
	int ret = 0;
2199
	int sc;
2200
	*handled = false;
2201
	sc = esi[0] & 0x3f;
2202
 
2203
	if (sc != mgr->sink_count) {
2204
		mgr->sink_count = sc;
2205
		*handled = true;
2206
	}
2207
 
2208
	if (esi[1] & DP_DOWN_REP_MSG_RDY) {
2209
		ret = drm_dp_mst_handle_down_rep(mgr);
2210
		*handled = true;
2211
	}
2212
 
2213
	if (esi[1] & DP_UP_REQ_MSG_RDY) {
2214
		ret |= drm_dp_mst_handle_up_req(mgr);
2215
		*handled = true;
2216
	}
2217
 
2218
	drm_dp_mst_kick_tx(mgr);
2219
	return ret;
2220
}
2221
EXPORT_SYMBOL(drm_dp_mst_hpd_irq);
2222
 
2223
/**
2224
 * drm_dp_mst_detect_port() - get connection status for an MST port
2225
 * @mgr: manager for this port
2226
 * @port: unverified pointer to a port
2227
 *
2228
 * This returns the current connection state for a port. It validates the
2229
 * port pointer still exists so the caller doesn't require a reference
2230
 */
5271 serge 2231
enum drm_connector_status drm_dp_mst_detect_port(struct drm_connector *connector,
2232
						 struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
5060 serge 2233
{
2234
	enum drm_connector_status status = connector_status_disconnected;
2235
 
2236
	/* we need to search for the port in the mgr in case its gone */
2237
	port = drm_dp_get_validated_port_ref(mgr, port);
2238
	if (!port)
2239
		return connector_status_disconnected;
2240
 
2241
	if (!port->ddps)
2242
		goto out;
2243
 
2244
	switch (port->pdt) {
2245
	case DP_PEER_DEVICE_NONE:
2246
	case DP_PEER_DEVICE_MST_BRANCHING:
2247
		break;
2248
 
2249
	case DP_PEER_DEVICE_SST_SINK:
2250
		status = connector_status_connected;
5271 serge 2251
		/* for logical ports - cache the EDID */
2252
		if (port->port_num >= 8 && !port->cached_edid) {
2253
			port->cached_edid = drm_get_edid(connector, &port->aux.ddc);
2254
		}
5060 serge 2255
		break;
2256
	case DP_PEER_DEVICE_DP_LEGACY_CONV:
2257
		if (port->ldps)
2258
			status = connector_status_connected;
2259
		break;
2260
	}
2261
out:
2262
	drm_dp_put_port(port);
2263
	return status;
2264
}
2265
EXPORT_SYMBOL(drm_dp_mst_detect_port);
2266
 
2267
/**
2268
 * drm_dp_mst_get_edid() - get EDID for an MST port
2269
 * @connector: toplevel connector to get EDID for
2270
 * @mgr: manager for this port
2271
 * @port: unverified pointer to a port.
2272
 *
2273
 * This returns an EDID for the port connected to a connector,
2274
 * It validates the pointer still exists so the caller doesn't require a
2275
 * reference.
2276
 */
2277
struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
2278
{
2279
	struct edid *edid = NULL;
2280
 
2281
	/* we need to search for the port in the mgr in case its gone */
2282
	port = drm_dp_get_validated_port_ref(mgr, port);
2283
	if (!port)
2284
		return NULL;
2285
 
5271 serge 2286
	if (port->cached_edid)
2287
		edid = drm_edid_duplicate(port->cached_edid);
6084 serge 2288
	else {
2289
		edid = drm_get_edid(connector, &port->aux.ddc);
2290
		drm_mode_connector_set_tile_property(connector);
2291
	}
5060 serge 2292
	drm_dp_put_port(port);
2293
	return edid;
2294
}
2295
EXPORT_SYMBOL(drm_dp_mst_get_edid);
2296
 
2297
/**
2298
 * drm_dp_find_vcpi_slots() - find slots for this PBN value
2299
 * @mgr: manager to use
2300
 * @pbn: payload bandwidth to convert into slots.
2301
 */
2302
int drm_dp_find_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr,
2303
			   int pbn)
2304
{
2305
	int num_slots;
2306
 
2307
	num_slots = DIV_ROUND_UP(pbn, mgr->pbn_div);
2308
 
2309
	if (num_slots > mgr->avail_slots)
2310
		return -ENOSPC;
2311
	return num_slots;
2312
}
2313
EXPORT_SYMBOL(drm_dp_find_vcpi_slots);
2314
 
2315
static int drm_dp_init_vcpi(struct drm_dp_mst_topology_mgr *mgr,
2316
			    struct drm_dp_vcpi *vcpi, int pbn)
2317
{
2318
	int num_slots;
2319
	int ret;
2320
 
2321
	num_slots = DIV_ROUND_UP(pbn, mgr->pbn_div);
2322
 
2323
	if (num_slots > mgr->avail_slots)
2324
		return -ENOSPC;
2325
 
2326
	vcpi->pbn = pbn;
2327
	vcpi->aligned_pbn = num_slots * mgr->pbn_div;
2328
	vcpi->num_slots = num_slots;
2329
 
2330
	ret = drm_dp_mst_assign_payload_id(mgr, vcpi);
2331
	if (ret < 0)
2332
		return ret;
2333
	return 0;
2334
}
2335
 
2336
/**
2337
 * drm_dp_mst_allocate_vcpi() - Allocate a virtual channel
2338
 * @mgr: manager for this port
2339
 * @port: port to allocate a virtual channel for.
2340
 * @pbn: payload bandwidth number to request
2341
 * @slots: returned number of slots for this PBN.
2342
 */
2343
bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, int pbn, int *slots)
2344
{
2345
	int ret;
2346
 
2347
	port = drm_dp_get_validated_port_ref(mgr, port);
2348
	if (!port)
2349
		return false;
2350
 
2351
	if (port->vcpi.vcpi > 0) {
2352
		DRM_DEBUG_KMS("payload: vcpi %d already allocated for pbn %d - requested pbn %d\n", port->vcpi.vcpi, port->vcpi.pbn, pbn);
2353
		if (pbn == port->vcpi.pbn) {
2354
			*slots = port->vcpi.num_slots;
2355
			return true;
2356
		}
2357
	}
2358
 
2359
	ret = drm_dp_init_vcpi(mgr, &port->vcpi, pbn);
2360
	if (ret) {
2361
		DRM_DEBUG_KMS("failed to init vcpi %d %d %d\n", DIV_ROUND_UP(pbn, mgr->pbn_div), mgr->avail_slots, ret);
2362
		goto out;
2363
	}
2364
	DRM_DEBUG_KMS("initing vcpi for %d %d\n", pbn, port->vcpi.num_slots);
2365
	*slots = port->vcpi.num_slots;
2366
 
2367
	drm_dp_put_port(port);
2368
	return true;
2369
out:
2370
	return false;
2371
}
2372
EXPORT_SYMBOL(drm_dp_mst_allocate_vcpi);
2373
 
6084 serge 2374
int drm_dp_mst_get_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
2375
{
2376
	int slots = 0;
2377
	port = drm_dp_get_validated_port_ref(mgr, port);
2378
	if (!port)
2379
		return slots;
2380
 
2381
	slots = port->vcpi.num_slots;
2382
	drm_dp_put_port(port);
2383
	return slots;
2384
}
2385
EXPORT_SYMBOL(drm_dp_mst_get_vcpi_slots);
2386
 
5060 serge 2387
/**
2388
 * drm_dp_mst_reset_vcpi_slots() - Reset number of slots to 0 for VCPI
2389
 * @mgr: manager for this port
2390
 * @port: unverified pointer to a port.
2391
 *
2392
 * This just resets the number of slots for the ports VCPI for later programming.
2393
 */
2394
void drm_dp_mst_reset_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
2395
{
2396
	port = drm_dp_get_validated_port_ref(mgr, port);
2397
	if (!port)
2398
		return;
2399
	port->vcpi.num_slots = 0;
2400
	drm_dp_put_port(port);
2401
}
2402
EXPORT_SYMBOL(drm_dp_mst_reset_vcpi_slots);
2403
 
2404
/**
2405
 * drm_dp_mst_deallocate_vcpi() - deallocate a VCPI
2406
 * @mgr: manager for this port
2407
 * @port: unverified port to deallocate vcpi for
2408
 */
2409
void drm_dp_mst_deallocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
2410
{
2411
	port = drm_dp_get_validated_port_ref(mgr, port);
2412
	if (!port)
2413
		return;
2414
 
2415
	drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
2416
	port->vcpi.num_slots = 0;
2417
	port->vcpi.pbn = 0;
2418
	port->vcpi.aligned_pbn = 0;
2419
	port->vcpi.vcpi = 0;
2420
	drm_dp_put_port(port);
2421
}
2422
EXPORT_SYMBOL(drm_dp_mst_deallocate_vcpi);
2423
 
2424
static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
2425
				     int id, struct drm_dp_payload *payload)
2426
{
2427
	u8 payload_alloc[3], status;
2428
	int ret;
2429
	int retries = 0;
2430
 
2431
	drm_dp_dpcd_writeb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS,
2432
			   DP_PAYLOAD_TABLE_UPDATED);
2433
 
2434
	payload_alloc[0] = id;
2435
	payload_alloc[1] = payload->start_slot;
2436
	payload_alloc[2] = payload->num_slots;
2437
 
2438
	ret = drm_dp_dpcd_write(mgr->aux, DP_PAYLOAD_ALLOCATE_SET, payload_alloc, 3);
2439
	if (ret != 3) {
2440
		DRM_DEBUG_KMS("failed to write payload allocation %d\n", ret);
2441
		goto fail;
2442
	}
2443
 
2444
retry:
2445
	ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
2446
	if (ret < 0) {
2447
		DRM_DEBUG_KMS("failed to read payload table status %d\n", ret);
2448
		goto fail;
2449
	}
2450
 
2451
	if (!(status & DP_PAYLOAD_TABLE_UPDATED)) {
2452
		retries++;
2453
		if (retries < 20) {
2454
			usleep_range(10000, 20000);
2455
			goto retry;
2456
		}
2457
		DRM_DEBUG_KMS("status not set after read payload table status %d\n", status);
2458
		ret = -EINVAL;
2459
		goto fail;
2460
	}
2461
	ret = 0;
2462
fail:
2463
	return ret;
2464
}
2465
 
2466
 
2467
/**
2468
 * drm_dp_check_act_status() - Check ACT handled status.
2469
 * @mgr: manager to use
2470
 *
2471
 * Check the payload status bits in the DPCD for ACT handled completion.
2472
 */
2473
int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr)
2474
{
2475
	u8 status;
2476
	int ret;
2477
	int count = 0;
2478
 
2479
	do {
2480
		ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
2481
 
2482
		if (ret < 0) {
2483
			DRM_DEBUG_KMS("failed to read payload table status %d\n", ret);
2484
			goto fail;
2485
		}
2486
 
2487
		if (status & DP_PAYLOAD_ACT_HANDLED)
2488
			break;
2489
		count++;
2490
		udelay(100);
2491
 
2492
	} while (count < 30);
2493
 
2494
	if (!(status & DP_PAYLOAD_ACT_HANDLED)) {
2495
		DRM_DEBUG_KMS("failed to get ACT bit %d after %d retries\n", status, count);
2496
		ret = -EINVAL;
2497
		goto fail;
2498
	}
2499
	return 0;
2500
fail:
2501
	return ret;
2502
}
2503
EXPORT_SYMBOL(drm_dp_check_act_status);
2504
 
2505
/**
2506
 * drm_dp_calc_pbn_mode() - Calculate the PBN for a mode.
2507
 * @clock: dot clock for the mode
2508
 * @bpp: bpp for the mode.
2509
 *
2510
 * This uses the formula in the spec to calculate the PBN value for a mode.
2511
 */
2512
int drm_dp_calc_pbn_mode(int clock, int bpp)
2513
{
2514
	fixed20_12 pix_bw;
2515
	fixed20_12 fbpp;
2516
	fixed20_12 result;
2517
	fixed20_12 margin, tmp;
2518
	u32 res;
2519
 
2520
	pix_bw.full = dfixed_const(clock);
2521
	fbpp.full = dfixed_const(bpp);
2522
	tmp.full = dfixed_const(8);
2523
	fbpp.full = dfixed_div(fbpp, tmp);
2524
 
2525
	result.full = dfixed_mul(pix_bw, fbpp);
2526
	margin.full = dfixed_const(54);
2527
	tmp.full = dfixed_const(64);
2528
	margin.full = dfixed_div(margin, tmp);
2529
	result.full = dfixed_div(result, margin);
2530
 
2531
	margin.full = dfixed_const(1006);
2532
	tmp.full = dfixed_const(1000);
2533
	margin.full = dfixed_div(margin, tmp);
2534
	result.full = dfixed_mul(result, margin);
2535
 
2536
	result.full = dfixed_div(result, tmp);
2537
	result.full = dfixed_ceil(result);
2538
	res = dfixed_trunc(result);
2539
	return res;
2540
}
2541
EXPORT_SYMBOL(drm_dp_calc_pbn_mode);
2542
 
2543
static int test_calc_pbn_mode(void)
2544
{
2545
	int ret;
2546
	ret = drm_dp_calc_pbn_mode(154000, 30);
2547
	if (ret != 689)
2548
		return -EINVAL;
2549
	ret = drm_dp_calc_pbn_mode(234000, 30);
2550
	if (ret != 1047)
2551
		return -EINVAL;
2552
	return 0;
2553
}
2554
 
2555
/* we want to kick the TX after we've ack the up/down IRQs. */
2556
static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr)
2557
{
2558
//   queue_work(system_long_wq, &mgr->tx_work);
2559
}
2560
 
2561
static void drm_dp_mst_dump_mstb(struct seq_file *m,
2562
				 struct drm_dp_mst_branch *mstb)
2563
{
2564
	struct drm_dp_mst_port *port;
2565
	int tabs = mstb->lct;
2566
	char prefix[10];
2567
	int i;
2568
 
2569
	for (i = 0; i < tabs; i++)
2570
		prefix[i] = '\t';
2571
	prefix[i] = '\0';
2572
 
2573
//   seq_printf(m, "%smst: %p, %d\n", prefix, mstb, mstb->num_ports);
2574
//   list_for_each_entry(port, &mstb->ports, next) {
2575
//       seq_printf(m, "%sport: %d: ddps: %d ldps: %d, %p, conn: %p\n", prefix, port->port_num, port->ddps, port->ldps, port, port->connector);
2576
//       if (port->mstb)
2577
//           drm_dp_mst_dump_mstb(m, port->mstb);
2578
//   }
2579
}
2580
 
2581
static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
2582
				  char *buf)
2583
{
2584
	int ret;
2585
	int i;
2586
	for (i = 0; i < 4; i++) {
2587
		ret = drm_dp_dpcd_read(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS + (i * 16), &buf[i * 16], 16);
2588
		if (ret != 16)
2589
			break;
2590
	}
2591
	if (i == 4)
2592
		return true;
2593
	return false;
2594
}
2595
 
2596
/**
2597
 * drm_dp_mst_dump_topology(): dump topology to seq file.
2598
 * @m: seq_file to dump output to
2599
 * @mgr: manager to dump current topology for.
2600
 *
2601
 * helper to dump MST topology to a seq file for debugfs.
2602
 */
2603
void drm_dp_mst_dump_topology(struct seq_file *m,
2604
			      struct drm_dp_mst_topology_mgr *mgr)
2605
{
2606
	int i;
2607
	struct drm_dp_mst_port *port;
2608
	mutex_lock(&mgr->lock);
2609
	if (mgr->mst_primary)
2610
		drm_dp_mst_dump_mstb(m, mgr->mst_primary);
2611
 
2612
	/* dump VCPIs */
2613
	mutex_unlock(&mgr->lock);
2614
 
2615
 
2616
 
2617
}
2618
EXPORT_SYMBOL(drm_dp_mst_dump_topology);
2619
 
2620
static void drm_dp_tx_work(struct work_struct *work)
2621
{
2622
	struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, tx_work);
2623
 
2624
	mutex_lock(&mgr->qlock);
2625
	if (mgr->tx_down_in_progress)
2626
		process_single_down_tx_qlock(mgr);
2627
	mutex_unlock(&mgr->qlock);
2628
}
2629
 
2630
/**
2631
 * drm_dp_mst_topology_mgr_init - initialise a topology manager
2632
 * @mgr: manager struct to initialise
2633
 * @dev: device providing this structure - for i2c addition.
2634
 * @aux: DP helper aux channel to talk to this device
2635
 * @max_dpcd_transaction_bytes: hw specific DPCD transaction limit
2636
 * @max_payloads: maximum number of payloads this GPU can source
2637
 * @conn_base_id: the connector object ID the MST device is connected to.
2638
 *
2639
 * Return 0 for success, or negative error code on failure
2640
 */
2641
int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
2642
				 struct device *dev, struct drm_dp_aux *aux,
2643
				 int max_dpcd_transaction_bytes,
2644
				 int max_payloads, int conn_base_id)
2645
{
2646
	mutex_init(&mgr->lock);
2647
	mutex_init(&mgr->qlock);
2648
	mutex_init(&mgr->payload_lock);
6084 serge 2649
	mutex_init(&mgr->destroy_connector_lock);
5060 serge 2650
	INIT_LIST_HEAD(&mgr->tx_msg_upq);
2651
	INIT_LIST_HEAD(&mgr->tx_msg_downq);
6084 serge 2652
	INIT_LIST_HEAD(&mgr->destroy_connector_list);
5060 serge 2653
	INIT_WORK(&mgr->work, drm_dp_mst_link_probe_work);
2654
	INIT_WORK(&mgr->tx_work, drm_dp_tx_work);
6088 serge 2655
	init_waitqueue_head(&mgr->tx_waitq);
5060 serge 2656
	mgr->dev = dev;
2657
	mgr->aux = aux;
2658
	mgr->max_dpcd_transaction_bytes = max_dpcd_transaction_bytes;
2659
	mgr->max_payloads = max_payloads;
2660
	mgr->conn_base_id = conn_base_id;
2661
	mgr->payloads = kcalloc(max_payloads, sizeof(struct drm_dp_payload), GFP_KERNEL);
2662
	if (!mgr->payloads)
2663
		return -ENOMEM;
2664
	mgr->proposed_vcpis = kcalloc(max_payloads, sizeof(struct drm_dp_vcpi *), GFP_KERNEL);
2665
	if (!mgr->proposed_vcpis)
2666
		return -ENOMEM;
2667
	set_bit(0, &mgr->payload_mask);
2668
	test_calc_pbn_mode();
2669
	return 0;
2670
}
2671
EXPORT_SYMBOL(drm_dp_mst_topology_mgr_init);
2672
 
2673
/**
2674
 * drm_dp_mst_topology_mgr_destroy() - destroy topology manager.
2675
 * @mgr: manager to destroy
2676
 */
2677
void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr)
2678
{
2679
	mutex_lock(&mgr->payload_lock);
2680
	kfree(mgr->payloads);
2681
	mgr->payloads = NULL;
2682
	kfree(mgr->proposed_vcpis);
2683
	mgr->proposed_vcpis = NULL;
2684
	mutex_unlock(&mgr->payload_lock);
2685
	mgr->dev = NULL;
2686
	mgr->aux = NULL;
2687
}
2688
EXPORT_SYMBOL(drm_dp_mst_topology_mgr_destroy);
2689
 
2690
/* I2C device */
2691
static int drm_dp_mst_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
2692
			       int num)
2693
{
2694
	struct drm_dp_aux *aux = adapter->algo_data;
2695
	struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port, aux);
2696
	struct drm_dp_mst_branch *mstb;
2697
	struct drm_dp_mst_topology_mgr *mgr = port->mgr;
2698
	unsigned int i;
2699
	bool reading = false;
2700
	struct drm_dp_sideband_msg_req_body msg;
2701
	struct drm_dp_sideband_msg_tx *txmsg = NULL;
2702
	int ret;
2703
 
2704
	mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
2705
	if (!mstb)
2706
		return -EREMOTEIO;
2707
 
2708
	/* construct i2c msg */
2709
	/* see if last msg is a read */
2710
	if (msgs[num - 1].flags & I2C_M_RD)
2711
		reading = true;
2712
 
6084 serge 2713
	if (!reading || (num - 1 > DP_REMOTE_I2C_READ_MAX_TRANSACTIONS)) {
5060 serge 2714
		DRM_DEBUG_KMS("Unsupported I2C transaction for MST device\n");
2715
		ret = -EIO;
2716
		goto out;
2717
	}
2718
 
6084 serge 2719
	memset(&msg, 0, sizeof(msg));
5060 serge 2720
	msg.req_type = DP_REMOTE_I2C_READ;
2721
	msg.u.i2c_read.num_transactions = num - 1;
2722
	msg.u.i2c_read.port_number = port->port_num;
2723
	for (i = 0; i < num - 1; i++) {
2724
		msg.u.i2c_read.transactions[i].i2c_dev_id = msgs[i].addr;
2725
		msg.u.i2c_read.transactions[i].num_bytes = msgs[i].len;
2726
		msg.u.i2c_read.transactions[i].bytes = msgs[i].buf;
2727
	}
2728
	msg.u.i2c_read.read_i2c_device_id = msgs[num - 1].addr;
2729
	msg.u.i2c_read.num_bytes_read = msgs[num - 1].len;
2730
 
2731
	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
2732
	if (!txmsg) {
2733
		ret = -ENOMEM;
2734
		goto out;
2735
	}
2736
 
2737
	txmsg->dst = mstb;
2738
	drm_dp_encode_sideband_req(&msg, txmsg);
2739
 
2740
	drm_dp_queue_down_tx(mgr, txmsg);
2741
 
2742
	ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
2743
	if (ret > 0) {
2744
 
2745
		if (txmsg->reply.reply_type == 1) { /* got a NAK back */
2746
			ret = -EREMOTEIO;
2747
			goto out;
2748
		}
2749
		if (txmsg->reply.u.remote_i2c_read_ack.num_bytes != msgs[num - 1].len) {
2750
			ret = -EIO;
2751
			goto out;
2752
		}
2753
		memcpy(msgs[num - 1].buf, txmsg->reply.u.remote_i2c_read_ack.bytes, msgs[num - 1].len);
2754
		ret = num;
2755
	}
2756
out:
2757
	kfree(txmsg);
2758
	drm_dp_put_mst_branch_device(mstb);
2759
	return ret;
2760
}
2761
 
2762
static u32 drm_dp_mst_i2c_functionality(struct i2c_adapter *adapter)
2763
{
2764
	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
2765
	       I2C_FUNC_SMBUS_READ_BLOCK_DATA |
2766
	       I2C_FUNC_SMBUS_BLOCK_PROC_CALL |
2767
	       I2C_FUNC_10BIT_ADDR;
2768
}
2769
 
2770
static const struct i2c_algorithm drm_dp_mst_i2c_algo = {
2771
	.functionality = drm_dp_mst_i2c_functionality,
2772
	.master_xfer = drm_dp_mst_i2c_xfer,
2773
};
2774
 
2775
/**
2776
 * drm_dp_mst_register_i2c_bus() - register an I2C adapter for I2C-over-AUX
2777
 * @aux: DisplayPort AUX channel
2778
 *
2779
 * Returns 0 on success or a negative error code on failure.
2780
 */
2781
static int drm_dp_mst_register_i2c_bus(struct drm_dp_aux *aux)
2782
{
2783
	aux->ddc.algo = &drm_dp_mst_i2c_algo;
2784
	aux->ddc.algo_data = aux;
2785
	aux->ddc.retries = 3;
2786
 
2787
	aux->ddc.class = I2C_CLASS_DDC;
2788
	aux->ddc.owner = THIS_MODULE;
2789
	aux->ddc.dev.parent = aux->dev;
2790
 
2791
	return i2c_add_adapter(&aux->ddc);
2792
}
2793
 
2794
/**
2795
 * drm_dp_mst_unregister_i2c_bus() - unregister an I2C-over-AUX adapter
2796
 * @aux: DisplayPort AUX channel
2797
 */
2798
static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_aux *aux)
2799
{
2800
	i2c_del_adapter(&aux->ddc);
2801
}