Subversion Repositories Kolibri OS

Rev

Rev 6935 | Go to most recent revision | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 6935 Rev 6937
Line 26... Line 26...
26
 */
26
 */
Line 27... Line 27...
27
 
27
 
28
#include 
28
#include 
29
#include 
29
#include 
-
 
30
#include 
30
#include 
31
#include 
31
#include 
32
#include 
32
#include 
33
#include 
33
#include 
34
#include 
34
#include 
35
#include 
Line 385... Line 386...
385
 
386
 
386
	/*
387
	/*
387
	 * We don't have power sequencer currently.
388
	 * We don't have power sequencer currently.
388
	 * Pick one that's not used by other ports.
389
	 * Pick one that's not used by other ports.
389
	 */
390
	 */
390
	list_for_each_entry(encoder, &dev->mode_config.encoder_list,
-
 
391
			    base.head) {
391
	for_each_intel_encoder(dev, encoder) {
Line 392... Line 392...
392
		struct intel_dp *tmp;
392
		struct intel_dp *tmp;
393
 
393
 
Line 513... Line 513...
513
void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
513
void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
514
{
514
{
515
	struct drm_device *dev = dev_priv->dev;
515
	struct drm_device *dev = dev_priv->dev;
516
	struct intel_encoder *encoder;
516
	struct intel_encoder *encoder;
Line 517... Line 517...
517
 
517
 
518
	if (WARN_ON(!IS_VALLEYVIEW(dev)))
518
	if (WARN_ON(!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)))
Line 519... Line 519...
519
		return;
519
		return;
520
 
520
 
521
	/*
521
	/*
Line 526... Line 526...
526
	 * reference get/put must be done while _not_ holding pps_mutex.
526
	 * reference get/put must be done while _not_ holding pps_mutex.
527
	 * pps_{lock,unlock}() do these steps in the correct order, so one
527
	 * pps_{lock,unlock}() do these steps in the correct order, so one
528
	 * should use them always.
528
	 * should use them always.
529
	 */
529
	 */
Line 530... Line 530...
530
 
530
 
531
	list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
531
	for_each_intel_encoder(dev, encoder) {
Line 532... Line 532...
532
		struct intel_dp *intel_dp;
532
		struct intel_dp *intel_dp;
533
 
533
 
Line 534... Line 534...
534
		if (encoder->type != INTEL_OUTPUT_EDP)
534
		if (encoder->type != INTEL_OUTPUT_EDP)
535
			continue;
535
			continue;
536
 
536
 
537
		intel_dp = enc_to_intel_dp(&encoder->base);
537
		intel_dp = enc_to_intel_dp(&encoder->base);
Line -... Line 538...
-
 
538
		intel_dp->pps_pipe = INVALID_PIPE;
538
		intel_dp->pps_pipe = INVALID_PIPE;
539
	}
539
	}
540
}
540
}
541
 
Line 541... Line 542...
541
 
542
static i915_reg_t
542
static u32 _pp_ctrl_reg(struct intel_dp *intel_dp)
543
_pp_ctrl_reg(struct intel_dp *intel_dp)
Line 549... Line 550...
549
		return PCH_PP_CONTROL;
550
		return PCH_PP_CONTROL;
550
	else
551
	else
551
		return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
552
		return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
552
}
553
}
Line -... Line 554...
-
 
554
 
553
 
555
static i915_reg_t
554
static u32 _pp_stat_reg(struct intel_dp *intel_dp)
556
_pp_stat_reg(struct intel_dp *intel_dp)
555
{
557
{
Line 556... Line 558...
556
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
558
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
557
 
559
 
Line 577... Line 579...
577
	if (!is_edp(intel_dp) || code != SYS_RESTART)
579
	if (!is_edp(intel_dp) || code != SYS_RESTART)
578
		return 0;
580
		return 0;
Line 579... Line 581...
579
 
581
 
Line 580... Line 582...
580
	pps_lock(intel_dp);
582
	pps_lock(intel_dp);
581
 
583
 
582
	if (IS_VALLEYVIEW(dev)) {
584
	if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
583
		enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
585
		enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
Line 584... Line 586...
584
		u32 pp_ctrl_reg, pp_div_reg;
586
		i915_reg_t pp_ctrl_reg, pp_div_reg;
585
		u32 pp_div;
587
		u32 pp_div;
586
 
588
 
Line 606... Line 608...
606
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
608
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
607
	struct drm_i915_private *dev_priv = dev->dev_private;
609
	struct drm_i915_private *dev_priv = dev->dev_private;
Line 608... Line 610...
608
 
610
 
Line 609... Line 611...
609
	lockdep_assert_held(&dev_priv->pps_mutex);
611
	lockdep_assert_held(&dev_priv->pps_mutex);
610
 
612
 
611
	if (IS_VALLEYVIEW(dev) &&
613
	if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
Line 612... Line 614...
612
	    intel_dp->pps_pipe == INVALID_PIPE)
614
	    intel_dp->pps_pipe == INVALID_PIPE)
613
		return false;
615
		return false;
Line 620... Line 622...
620
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
622
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
621
	struct drm_i915_private *dev_priv = dev->dev_private;
623
	struct drm_i915_private *dev_priv = dev->dev_private;
Line 622... Line 624...
622
 
624
 
Line 623... Line 625...
623
	lockdep_assert_held(&dev_priv->pps_mutex);
625
	lockdep_assert_held(&dev_priv->pps_mutex);
624
 
626
 
625
	if (IS_VALLEYVIEW(dev) &&
627
	if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
Line 626... Line 628...
626
	    intel_dp->pps_pipe == INVALID_PIPE)
628
	    intel_dp->pps_pipe == INVALID_PIPE)
627
		return false;
629
		return false;
Line 650... Line 652...
650
intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
652
intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
651
{
653
{
652
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
654
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
653
	struct drm_device *dev = intel_dig_port->base.base.dev;
655
	struct drm_device *dev = intel_dig_port->base.base.dev;
654
	struct drm_i915_private *dev_priv = dev->dev_private;
656
	struct drm_i915_private *dev_priv = dev->dev_private;
655
	uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
657
	i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg;
656
	uint32_t status;
658
	uint32_t status;
657
	bool done;
659
	bool done;
Line 658... Line 660...
658
 
660
 
659
#define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
661
#define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
Line 677... Line 679...
677
 
679
 
678
	/*
680
	/*
679
	 * The clock divider is based off the hrawclk, and would like to run at
681
	 * The clock divider is based off the hrawclk, and would like to run at
680
	 * 2MHz.  So, take the hrawclk value and divide by 2 and use that
682
	 * 2MHz.  So, take the hrawclk value and divide by 2 and use that
681
	 */
683
	 */
682
	return index ? 0 : intel_hrawclk(dev) / 2;
684
	return index ? 0 : DIV_ROUND_CLOSEST(intel_hrawclk(dev), 2);
Line 683... Line 685...
683
}
685
}
684
 
686
 
685
static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
687
static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
Line 690... Line 692...
690
 
692
 
691
	if (index)
693
	if (index)
Line 692... Line 694...
692
		return 0;
694
		return 0;
693
 
695
 
Line 694... Line 696...
694
	if (intel_dig_port->port == PORT_A) {
696
	if (intel_dig_port->port == PORT_A) {
695
		return DIV_ROUND_UP(dev_priv->cdclk_freq, 2000);
697
		return DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 2000);
696
 
698
 
697
	} else {
699
	} else {
Line 698... Line 700...
698
		return DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
700
		return DIV_ROUND_CLOSEST(intel_pch_rawclk(dev), 2);
699
	}
701
	}
Line 707... Line 709...
707
 
709
 
708
	if (intel_dig_port->port == PORT_A) {
710
	if (intel_dig_port->port == PORT_A) {
709
		if (index)
711
		if (index)
710
			return 0;
712
			return 0;
711
		return DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 2000);
713
		return DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 2000);
712
	} else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
714
	} else if (HAS_PCH_LPT_H(dev_priv)) {
713
		/* Workaround for non-ULT HSW */
715
		/* Workaround for non-ULT HSW */
714
		switch (index) {
716
		switch (index) {
715
		case 0: return 63;
717
		case 0: return 63;
716
		case 1: return 72;
718
		case 1: return 72;
717
		default: return 0;
719
		default: return 0;
718
		}
720
		}
719
	} else  {
721
	} else  {
720
		return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
722
		return index ? 0 : DIV_ROUND_CLOSEST(intel_pch_rawclk(dev), 2);
721
	}
723
	}
Line 722... Line 724...
722
}
724
}
723
 
725
 
Line 748... Line 750...
748
	if (IS_GEN6(dev))
750
	if (IS_GEN6(dev))
749
		precharge = 3;
751
		precharge = 3;
750
	else
752
	else
751
		precharge = 5;
753
		precharge = 5;
Line 752... Line 754...
752
 
754
 
753
	if (IS_BROADWELL(dev) && intel_dp->aux_ch_ctl_reg == DPA_AUX_CH_CTL)
755
	if (IS_BROADWELL(dev) && intel_dig_port->port == PORT_A)
754
		timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
756
		timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
755
	else
757
	else
Line 756... Line 758...
756
		timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
758
		timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
Line 787... Line 789...
787
		uint8_t *recv, int recv_size)
789
		uint8_t *recv, int recv_size)
788
{
790
{
789
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
791
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
790
	struct drm_device *dev = intel_dig_port->base.base.dev;
792
	struct drm_device *dev = intel_dig_port->base.base.dev;
791
	struct drm_i915_private *dev_priv = dev->dev_private;
793
	struct drm_i915_private *dev_priv = dev->dev_private;
792
	uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
794
	i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg;
793
	uint32_t ch_data = ch_ctl + 4;
-
 
794
	uint32_t aux_clock_divider;
795
	uint32_t aux_clock_divider;
795
	int i, ret, recv_bytes;
796
	int i, ret, recv_bytes;
796
	uint32_t status;
797
	uint32_t status;
797
	int try, clock = 0;
798
	int try, clock = 0;
798
	bool has_aux_irq = HAS_AUX_IRQ(dev);
799
	bool has_aux_irq = HAS_AUX_IRQ(dev);
Line 851... Line 852...
851
 
852
 
852
		/* Must try at least 3 times according to DP spec */
853
		/* Must try at least 3 times according to DP spec */
853
		for (try = 0; try < 5; try++) {
854
		for (try = 0; try < 5; try++) {
854
			/* Load the send data into the aux channel data registers */
855
			/* Load the send data into the aux channel data registers */
855
			for (i = 0; i < send_bytes; i += 4)
856
			for (i = 0; i < send_bytes; i += 4)
856
				I915_WRITE(ch_data + i,
857
				I915_WRITE(intel_dp->aux_ch_data_reg[i >> 2],
857
					   intel_dp_pack_aux(send + i,
858
					   intel_dp_pack_aux(send + i,
Line 858... Line 859...
858
							     send_bytes - i));
859
							     send_bytes - i));
859
 
860
 
Line 911... Line 912...
911
	}
912
	}
Line 912... Line 913...
912
 
913
 
913
	/* Unload any bytes sent back from the other side */
914
	/* Unload any bytes sent back from the other side */
914
	recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
915
	recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
-
 
916
		      DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
-
 
917
 
-
 
918
	/*
-
 
919
	 * By BSpec: "Message sizes of 0 or >20 are not allowed."
-
 
920
	 * We have no idea of what happened so we return -EBUSY so
-
 
921
	 * drm layer takes care for the necessary retries.
-
 
922
	 */
-
 
923
	if (recv_bytes == 0 || recv_bytes > 20) {
-
 
924
		DRM_DEBUG_KMS("Forbidden recv_bytes = %d on aux transaction\n",
-
 
925
			      recv_bytes);
-
 
926
		/*
-
 
927
		 * FIXME: This patch was created on top of a series that
-
 
928
		 * organize the retries at drm level. There EBUSY should
-
 
929
		 * also take care for 1ms wait before retrying.
-
 
930
		 * That aux retries re-org is still needed and after that is
-
 
931
		 * merged we remove this sleep from here.
-
 
932
		 */
-
 
933
		usleep_range(1000, 1500);
-
 
934
		ret = -EBUSY;
-
 
935
		goto out;
-
 
936
	}
915
		      DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
937
 
916
	if (recv_bytes > recv_size)
938
	if (recv_bytes > recv_size)
Line 917... Line 939...
917
		recv_bytes = recv_size;
939
		recv_bytes = recv_size;
918
 
940
 
919
	for (i = 0; i < recv_bytes; i += 4)
941
	for (i = 0; i < recv_bytes; i += 4)
Line 920... Line 942...
920
		intel_dp_unpack_aux(I915_READ(ch_data + i),
942
		intel_dp_unpack_aux(I915_READ(intel_dp->aux_ch_data_reg[i >> 2]),
921
				    recv + i, recv_bytes - i);
943
				    recv + i, recv_bytes - i);
Line 1001... Line 1023...
1001
	}
1023
	}
Line 1002... Line 1024...
1002
 
1024
 
1003
	return ret;
1025
	return ret;
Line 1004... Line -...
1004
}
-
 
1005
 
1026
}
-
 
1027
 
1006
static void
1028
static i915_reg_t g4x_aux_ctl_reg(struct drm_i915_private *dev_priv,
-
 
1029
				       enum port port)
-
 
1030
{
-
 
1031
	switch (port) {
-
 
1032
	case PORT_B:
-
 
1033
	case PORT_C:
-
 
1034
	case PORT_D:
-
 
1035
		return DP_AUX_CH_CTL(port);
1007
intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
1036
	default:
-
 
1037
		MISSING_CASE(port);
-
 
1038
		return DP_AUX_CH_CTL(PORT_B);
-
 
1039
	}
1008
{
1040
}
-
 
1041
 
-
 
1042
static i915_reg_t g4x_aux_data_reg(struct drm_i915_private *dev_priv,
-
 
1043
					enum port port, int index)
-
 
1044
{
-
 
1045
	switch (port) {
-
 
1046
	case PORT_B:
-
 
1047
	case PORT_C:
-
 
1048
	case PORT_D:
-
 
1049
		return DP_AUX_CH_DATA(port, index);
-
 
1050
	default:
-
 
1051
		MISSING_CASE(port);
-
 
1052
		return DP_AUX_CH_DATA(PORT_B, index);
-
 
1053
	}
1009
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1054
}
1010
	struct drm_i915_private *dev_priv = dev->dev_private;
1055
 
-
 
1056
static i915_reg_t ilk_aux_ctl_reg(struct drm_i915_private *dev_priv,
-
 
1057
				       enum port port)
-
 
1058
{
-
 
1059
	switch (port) {
-
 
1060
	case PORT_A:
-
 
1061
		return DP_AUX_CH_CTL(port);
-
 
1062
	case PORT_B:
-
 
1063
	case PORT_C:
-
 
1064
	case PORT_D:
-
 
1065
		return PCH_DP_AUX_CH_CTL(port);
-
 
1066
	default:
-
 
1067
		MISSING_CASE(port);
-
 
1068
		return DP_AUX_CH_CTL(PORT_A);
-
 
1069
	}
1011
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1070
}
1012
	enum port port = intel_dig_port->port;
1071
 
-
 
1072
static i915_reg_t ilk_aux_data_reg(struct drm_i915_private *dev_priv,
-
 
1073
					enum port port, int index)
-
 
1074
{
-
 
1075
	switch (port) {
-
 
1076
	case PORT_A:
-
 
1077
		return DP_AUX_CH_DATA(port, index);
-
 
1078
	case PORT_B:
1013
	struct ddi_vbt_port_info *info = &dev_priv->vbt.ddi_port_info[port];
1079
	case PORT_C:
1014
	const char *name = NULL;
1080
	case PORT_D:
-
 
1081
		return PCH_DP_AUX_CH_DATA(port, index);
-
 
1082
	default:
-
 
1083
		MISSING_CASE(port);
-
 
1084
		return DP_AUX_CH_DATA(PORT_A, index);
Line -... Line 1085...
-
 
1085
	}
1015
	uint32_t porte_aux_ctl_reg = DPA_AUX_CH_CTL;
1086
}
1016
	int ret;
1087
 
1017
 
1088
/*
-
 
1089
 * On SKL we don't have Aux for port E so we rely
-
 
1090
 * on VBT to set a proper alternate aux channel.
-
 
1091
 */
1018
	/* On SKL we don't have Aux for port E so we rely on VBT to set
1092
static enum port skl_porte_aux_port(struct drm_i915_private *dev_priv)
-
 
1093
{
1019
	 * a proper alternate aux channel.
1094
	const struct ddi_vbt_port_info *info =
-
 
1095
		&dev_priv->vbt.ddi_port_info[PORT_E];
-
 
1096
 
1020
	 */
1097
		switch (info->alternate_aux_channel) {
1021
	if (IS_SKYLAKE(dev) && port == PORT_E) {
-
 
1022
		switch (info->alternate_aux_channel) {
1098
	case DP_AUX_A:
1023
		case DP_AUX_B:
1099
		return PORT_A;
1024
			porte_aux_ctl_reg = DPB_AUX_CH_CTL;
-
 
1025
			break;
1100
		case DP_AUX_B:
1026
		case DP_AUX_C:
1101
		return PORT_B;
1027
			porte_aux_ctl_reg = DPC_AUX_CH_CTL;
-
 
1028
			break;
-
 
1029
		case DP_AUX_D:
1102
		case DP_AUX_C:
1030
			porte_aux_ctl_reg = DPD_AUX_CH_CTL;
1103
		return PORT_C;
1031
			break;
1104
		case DP_AUX_D:
-
 
1105
		return PORT_D;
1032
		case DP_AUX_A:
1106
	default:
1033
		default:
1107
		MISSING_CASE(info->alternate_aux_channel);
Line -... Line 1108...
-
 
1108
		return PORT_A;
-
 
1109
	}
-
 
1110
}
-
 
1111
 
-
 
1112
static i915_reg_t skl_aux_ctl_reg(struct drm_i915_private *dev_priv,
-
 
1113
				       enum port port)
1034
			porte_aux_ctl_reg = DPA_AUX_CH_CTL;
1114
{
1035
		}
1115
	if (port == PORT_E)
1036
	}
-
 
1037
 
-
 
1038
	switch (port) {
-
 
1039
	case PORT_A:
1116
		port = skl_porte_aux_port(dev_priv);
1040
		intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL;
-
 
1041
		name = "DPDDC-A";
-
 
1042
		break;
-
 
1043
	case PORT_B:
1117
 
1044
		intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL;
-
 
1045
		name = "DPDDC-B";
-
 
1046
		break;
-
 
1047
	case PORT_C:
1118
	switch (port) {
1048
		intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL;
1119
	case PORT_A:
1049
		name = "DPDDC-C";
-
 
1050
		break;
-
 
1051
	case PORT_D:
-
 
1052
		intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL;
-
 
1053
		name = "DPDDC-D";
-
 
1054
		break;
-
 
1055
	case PORT_E:
1120
	case PORT_B:
-
 
1121
	case PORT_C:
-
 
1122
	case PORT_D:
1056
		intel_dp->aux_ch_ctl_reg = porte_aux_ctl_reg;
1123
		return DP_AUX_CH_CTL(port);
1057
		name = "DPDDC-E";
1124
		default:
Line -... Line 1125...
-
 
1125
		MISSING_CASE(port);
-
 
1126
		return DP_AUX_CH_CTL(PORT_A);
-
 
1127
		}
-
 
1128
	}
-
 
1129
 
-
 
1130
static i915_reg_t skl_aux_data_reg(struct drm_i915_private *dev_priv,
-
 
1131
					enum port port, int index)
-
 
1132
{
-
 
1133
	if (port == PORT_E)
-
 
1134
		port = skl_porte_aux_port(dev_priv);
-
 
1135
 
-
 
1136
	switch (port) {
-
 
1137
	case PORT_A:
-
 
1138
	case PORT_B:
-
 
1139
	case PORT_C:
1058
		break;
1140
	case PORT_D:
-
 
1141
		return DP_AUX_CH_DATA(port, index);
-
 
1142
	default:
-
 
1143
		MISSING_CASE(port);
-
 
1144
		return DP_AUX_CH_DATA(PORT_A, index);
-
 
1145
	}
-
 
1146
}
-
 
1147
 
-
 
1148
static i915_reg_t intel_aux_ctl_reg(struct drm_i915_private *dev_priv,
1059
	default:
1149
					 enum port port)
1060
		BUG();
1150
{
1061
	}
1151
	if (INTEL_INFO(dev_priv)->gen >= 9)
-
 
1152
		return skl_aux_ctl_reg(dev_priv, port);
-
 
1153
	else if (HAS_PCH_SPLIT(dev_priv))
1062
 
1154
		return ilk_aux_ctl_reg(dev_priv, port);
-
 
1155
	else
-
 
1156
		return g4x_aux_ctl_reg(dev_priv, port);
-
 
1157
}
-
 
1158
 
-
 
1159
static i915_reg_t intel_aux_data_reg(struct drm_i915_private *dev_priv,
1063
	/*
1160
					  enum port port, int index)
1064
	 * The AUX_CTL register is usually DP_CTL + 0x10.
1161
{
-
 
1162
	if (INTEL_INFO(dev_priv)->gen >= 9)
-
 
1163
		return skl_aux_data_reg(dev_priv, port, index);
-
 
1164
	else if (HAS_PCH_SPLIT(dev_priv))
1065
	 *
1165
		return ilk_aux_data_reg(dev_priv, port, index);
-
 
1166
	else
-
 
1167
		return g4x_aux_data_reg(dev_priv, port, index);
-
 
1168
}
1066
	 * On Haswell and Broadwell though:
1169
 
-
 
1170
static void intel_aux_reg_init(struct intel_dp *intel_dp)
-
 
1171
{
1067
	 *   - Both port A DDI_BUF_CTL and DDI_AUX_CTL are on the CPU
1172
	struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
1068
	 *   - Port B/C/D AUX channels are on the PCH, DDI_BUF_CTL on the CPU
1173
	enum port port = dp_to_dig_port(intel_dp)->port;
-
 
1174
	int i;
-
 
1175
 
-
 
1176
	intel_dp->aux_ch_ctl_reg = intel_aux_ctl_reg(dev_priv, port);
-
 
1177
	for (i = 0; i < ARRAY_SIZE(intel_dp->aux_ch_data_reg); i++)
-
 
1178
		intel_dp->aux_ch_data_reg[i] = intel_aux_data_reg(dev_priv, port, i);
-
 
1179
}
-
 
1180
 
-
 
1181
static void
-
 
1182
intel_dp_aux_fini(struct intel_dp *intel_dp)
-
 
1183
{
-
 
1184
	drm_dp_aux_unregister(&intel_dp->aux);
-
 
1185
	kfree(intel_dp->aux.name);
-
 
1186
}
-
 
1187
 
-
 
1188
static int
-
 
1189
intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
-
 
1190
{
-
 
1191
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
-
 
1192
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
-
 
1193
	enum port port = intel_dig_port->port;
-
 
1194
	int ret;
-
 
1195
 
Line 1069... Line -...
1069
	 *
-
 
1070
	 * Skylake moves AUX_CTL back next to DDI_BUF_CTL, on the CPU.
1196
	intel_aux_reg_init(intel_dp);
1071
	 */
1197
 
Line 1072... Line 1198...
1072
	if (!IS_HASWELL(dev) && !IS_BROADWELL(dev) && port != PORT_E)
1198
	intel_dp->aux.name = kasprintf(GFP_KERNEL, "DPDDC-%c", port_name(port));
1073
		intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
1199
	if (!intel_dp->aux.name)
-
 
1200
		return -ENOMEM;
Line 1074... Line 1201...
1074
 
1201
 
1075
	intel_dp->aux.name = name;
1202
	intel_dp->aux.dev = dev->dev;
1076
	intel_dp->aux.dev = dev->dev;
1203
	intel_dp->aux.transfer = intel_dp_aux_transfer;
1077
	intel_dp->aux.transfer = intel_dp_aux_transfer;
1204
 
-
 
1205
	DRM_DEBUG_KMS("registering %s bus for %s\n",
1078
 
1206
		      intel_dp->aux.name,
1079
	DRM_DEBUG_KMS("registering %s bus for %s\n", name,
1207
		      connector->base.kdev->kobj.name);
Line 1080... Line 1208...
1080
					"");
1208
 
1081
 
1209
	ret = drm_dp_aux_register(&intel_dp->aux);
1082
	ret = drm_dp_aux_register(&intel_dp->aux);
1210
	if (ret < 0) {
1083
	if (ret < 0) {
1211
		DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n",
1084
		DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n",
1212
			  intel_dp->aux.name, ret);
-
 
1213
		kfree(intel_dp->aux.name);
1085
			  name, ret);
1214
		return ret;
-
 
1215
	}
1086
		return;
1216
 
-
 
1217
	ret = sysfs_create_link(&connector->base.kdev->kobj,
-
 
1218
				&intel_dp->aux.ddc.dev.kobj,
1087
	}
1219
				intel_dp->aux.ddc.dev.kobj.name);
Line 1088... Line 1220...
1088
 
1220
	if (ret < 0) {
1089
	ret = sysfs_create_link(&connector->base.kdev->kobj,
1221
		DRM_ERROR("sysfs_create_link() for %s failed (%d)\n",
1090
				&intel_dp->aux.ddc.dev.kobj,
1222
			  intel_dp->aux.name, ret);
Line 1182... Line 1314...
1182
	*sink_rates = default_rates;
1314
	*sink_rates = default_rates;
Line 1183... Line 1315...
1183
 
1315
 
1184
	return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
1316
	return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
Line 1185... Line 1317...
1185
}
1317
}
1186
 
1318
 
-
 
1319
bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp)
-
 
1320
{
-
 
1321
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1187
static bool intel_dp_source_supports_hbr2(struct drm_device *dev)
1322
	struct drm_device *dev = dig_port->base.base.dev;
1188
{
1323
 
1189
	/* WaDisableHBR2:skl */
1324
	/* WaDisableHBR2:skl */
Line 1190... Line 1325...
1190
	if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0)
1325
	if (IS_SKL_REVID(dev, 0, SKL_REVID_B0))
1191
		return false;
1326
		return false;
1192
 
1327
 
1193
	if ((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) || IS_BROADWELL(dev) ||
1328
	if ((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) || IS_BROADWELL(dev) ||
1194
	    (INTEL_INFO(dev)->gen >= 9))
1329
	    (INTEL_INFO(dev)->gen >= 9))
1195
		return true;
1330
		return true;
Line 1196... Line 1331...
1196
	else
1331
	else
1197
		return false;
1332
		return false;
1198
}
1333
}
-
 
1334
 
-
 
1335
static int
1199
 
1336
intel_dp_source_rates(struct intel_dp *intel_dp, const int **source_rates)
Line 1200... Line 1337...
1200
static int
1337
{
1201
intel_dp_source_rates(struct drm_device *dev, const int **source_rates)
1338
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1202
{
1339
	struct drm_device *dev = dig_port->base.base.dev;
1203
	int size;
1340
	int size;
1204
 
1341
 
1205
	if (IS_BROXTON(dev)) {
1342
	if (IS_BROXTON(dev)) {
1206
		*source_rates = bxt_rates;
1343
		*source_rates = bxt_rates;
1207
		size = ARRAY_SIZE(bxt_rates);
1344
		size = ARRAY_SIZE(bxt_rates);
1208
	} else if (IS_SKYLAKE(dev)) {
1345
	} else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
1209
		*source_rates = skl_rates;
1346
		*source_rates = skl_rates;
Line 1210... Line 1347...
1210
		size = ARRAY_SIZE(skl_rates);
1347
		size = ARRAY_SIZE(skl_rates);
1211
	} else {
1348
	} else {
1212
		*source_rates = default_rates;
1349
		*source_rates = default_rates;
Line 1213... Line 1350...
1213
		size = ARRAY_SIZE(default_rates);
1350
		size = ARRAY_SIZE(default_rates);
1214
	}
1351
	}
Line 1277... Line 1414...
1277
}
1414
}
Line 1278... Line 1415...
1278
 
1415
 
1279
static int intel_dp_common_rates(struct intel_dp *intel_dp,
1416
static int intel_dp_common_rates(struct intel_dp *intel_dp,
1280
				 int *common_rates)
1417
				 int *common_rates)
1281
{
-
 
1282
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1418
{
1283
	const int *source_rates, *sink_rates;
1419
	const int *source_rates, *sink_rates;
Line 1284... Line 1420...
1284
	int source_len, sink_len;
1420
	int source_len, sink_len;
1285
 
1421
 
Line 1286... Line 1422...
1286
	sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1422
	sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1287
	source_len = intel_dp_source_rates(dev, &source_rates);
1423
	source_len = intel_dp_source_rates(intel_dp, &source_rates);
1288
 
1424
 
1289
	return intersect_rates(source_rates, source_len,
1425
	return intersect_rates(source_rates, source_len,
Line 1307... Line 1443...
1307
	}
1443
	}
1308
}
1444
}
Line 1309... Line 1445...
1309
 
1445
 
1310
static void intel_dp_print_rates(struct intel_dp *intel_dp)
1446
static void intel_dp_print_rates(struct intel_dp *intel_dp)
1311
{
-
 
1312
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1447
{
1313
	const int *source_rates, *sink_rates;
1448
	const int *source_rates, *sink_rates;
1314
	int source_len, sink_len, common_len;
1449
	int source_len, sink_len, common_len;
1315
	int common_rates[DP_MAX_SUPPORTED_RATES];
1450
	int common_rates[DP_MAX_SUPPORTED_RATES];
Line 1316... Line 1451...
1316
	char str[128]; /* FIXME: too big for stack? */
1451
	char str[128]; /* FIXME: too big for stack? */
1317
 
1452
 
Line 1318... Line 1453...
1318
	if ((drm_debug & DRM_UT_KMS) == 0)
1453
	if ((drm_debug & DRM_UT_KMS) == 0)
1319
		return;
1454
		return;
1320
 
1455
 
Line 1321... Line 1456...
1321
	source_len = intel_dp_source_rates(dev, &source_rates);
1456
	source_len = intel_dp_source_rates(intel_dp, &source_rates);
1322
	snprintf_int_array(str, sizeof(str), source_rates, source_len);
1457
	snprintf_int_array(str, sizeof(str), source_rates, source_len);
Line 1358... Line 1493...
1358
int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1493
int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1359
{
1494
{
1360
	return rate_to_index(rate, intel_dp->sink_rates);
1495
	return rate_to_index(rate, intel_dp->sink_rates);
1361
}
1496
}
Line 1362... Line 1497...
1362
 
1497
 
1363
static void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
1498
void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
1364
				  uint8_t *link_bw, uint8_t *rate_select)
1499
				  uint8_t *link_bw, uint8_t *rate_select)
1365
{
1500
{
1366
	if (intel_dp->num_sink_rates) {
1501
	if (intel_dp->num_sink_rates) {
1367
		*link_bw = 0;
1502
		*link_bw = 0;
Line 1419... Line 1554...
1419
			ret = skl_update_scaler_crtc(pipe_config);
1554
			ret = skl_update_scaler_crtc(pipe_config);
1420
			if (ret)
1555
			if (ret)
1421
				return ret;
1556
				return ret;
1422
		}
1557
		}
Line 1423... Line 1558...
1423
 
1558
 
1424
		if (!HAS_PCH_SPLIT(dev))
1559
		if (HAS_GMCH_DISPLAY(dev))
1425
			intel_gmch_panel_fitting(intel_crtc, pipe_config,
1560
			intel_gmch_panel_fitting(intel_crtc, pipe_config,
1426
						 intel_connector->panel.fitting_mode);
1561
						 intel_connector->panel.fitting_mode);
1427
		else
1562
		else
1428
			intel_pch_panel_fitting(intel_crtc, pipe_config,
1563
			intel_pch_panel_fitting(intel_crtc, pipe_config,
Line 1523... Line 1658...
1523
				intel_connector->panel.downclock_mode->clock,
1658
				intel_connector->panel.downclock_mode->clock,
1524
				pipe_config->port_clock,
1659
				pipe_config->port_clock,
1525
				&pipe_config->dp_m2_n2);
1660
				&pipe_config->dp_m2_n2);
1526
	}
1661
	}
Line 1527... Line 1662...
1527
 
1662
 
1528
	if (IS_SKYLAKE(dev) && is_edp(intel_dp))
1663
	if ((IS_SKYLAKE(dev)  || IS_KABYLAKE(dev)) && is_edp(intel_dp))
1529
		skl_edp_set_pll_config(pipe_config);
1664
		skl_edp_set_pll_config(pipe_config);
1530
	else if (IS_BROXTON(dev))
1665
	else if (IS_BROXTON(dev))
1531
		/* handled in ddi */;
1666
		/* handled in ddi */;
1532
	else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
1667
	else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
Line 1535... Line 1670...
1535
		intel_dp_set_clock(encoder, pipe_config);
1670
		intel_dp_set_clock(encoder, pipe_config);
Line 1536... Line 1671...
1536
 
1671
 
1537
	return true;
1672
	return true;
Line 1538... Line -...
1538
}
-
 
1539
 
-
 
1540
static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp)
-
 
1541
{
-
 
1542
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
-
 
1543
	struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
-
 
1544
	struct drm_device *dev = crtc->base.dev;
-
 
1545
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
1546
	u32 dpa_ctl;
-
 
1547
 
-
 
1548
	DRM_DEBUG_KMS("eDP PLL enable for clock %d\n",
-
 
1549
		      crtc->config->port_clock);
-
 
1550
	dpa_ctl = I915_READ(DP_A);
-
 
1551
	dpa_ctl &= ~DP_PLL_FREQ_MASK;
-
 
1552
 
-
 
1553
	if (crtc->config->port_clock == 162000) {
-
 
1554
		/* For a long time we've carried around a ILK-DevA w/a for the
-
 
1555
		 * 160MHz clock. If we're really unlucky, it's still required.
-
 
1556
		 */
-
 
1557
		DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n");
-
 
1558
		dpa_ctl |= DP_PLL_FREQ_160MHZ;
-
 
1559
		intel_dp->DP |= DP_PLL_FREQ_160MHZ;
-
 
1560
	} else {
-
 
1561
		dpa_ctl |= DP_PLL_FREQ_270MHZ;
-
 
1562
		intel_dp->DP |= DP_PLL_FREQ_270MHZ;
-
 
1563
	}
-
 
1564
 
-
 
1565
	I915_WRITE(DP_A, dpa_ctl);
-
 
1566
 
-
 
1567
	POSTING_READ(DP_A);
-
 
1568
	udelay(500);
-
 
1569
}
1673
}
1570
 
1674
 
1571
void intel_dp_set_link_params(struct intel_dp *intel_dp,
1675
void intel_dp_set_link_params(struct intel_dp *intel_dp,
1572
			      const struct intel_crtc_state *pipe_config)
1676
			      const struct intel_crtc_state *pipe_config)
1573
{
1677
{
Line 1610... Line 1714...
1610
 
1714
 
1611
	/* Handle DP bits in common between all three register formats */
1715
	/* Handle DP bits in common between all three register formats */
1612
	intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
1716
	intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
Line 1613... Line -...
1613
	intel_dp->DP |= DP_PORT_WIDTH(crtc->config->lane_count);
-
 
1614
 
-
 
1615
	if (crtc->config->has_audio)
-
 
1616
		intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
1717
	intel_dp->DP |= DP_PORT_WIDTH(crtc->config->lane_count);
Line 1617... Line 1718...
1617
 
1718
 
1618
	/* Split out the IBX/CPU vs CPT settings */
1719
	/* Split out the IBX/CPU vs CPT settings */
1619
 
1720
 
Line 1639... Line 1740...
1639
		else
1740
		else
1640
			trans_dp &= ~TRANS_DP_ENH_FRAMING;
1741
			trans_dp &= ~TRANS_DP_ENH_FRAMING;
1641
		I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp);
1742
		I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp);
1642
	} else {
1743
	} else {
1643
		if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
1744
		if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
1644
		    crtc->config->limited_color_range)
1745
		    !IS_CHERRYVIEW(dev) && crtc->config->limited_color_range)
1645
			intel_dp->DP |= DP_COLOR_RANGE_16_235;
1746
			intel_dp->DP |= DP_COLOR_RANGE_16_235;
Line 1646... Line 1747...
1646
 
1747
 
1647
		if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1748
		if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1648
			intel_dp->DP |= DP_SYNC_HS_HIGH;
1749
			intel_dp->DP |= DP_SYNC_HS_HIGH;
Line 1673... Line 1774...
1673
				       u32 mask,
1774
				       u32 mask,
1674
				       u32 value)
1775
				       u32 value)
1675
{
1776
{
1676
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1777
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1677
	struct drm_i915_private *dev_priv = dev->dev_private;
1778
	struct drm_i915_private *dev_priv = dev->dev_private;
1678
	u32 pp_stat_reg, pp_ctrl_reg;
1779
	i915_reg_t pp_stat_reg, pp_ctrl_reg;
Line 1679... Line 1780...
1679
 
1780
 
Line 1680... Line 1781...
1680
	lockdep_assert_held(&dev_priv->pps_mutex);
1781
	lockdep_assert_held(&dev_priv->pps_mutex);
1681
 
1782
 
Line 1763... Line 1864...
1763
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1864
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1764
	struct intel_encoder *intel_encoder = &intel_dig_port->base;
1865
	struct intel_encoder *intel_encoder = &intel_dig_port->base;
1765
	struct drm_i915_private *dev_priv = dev->dev_private;
1866
	struct drm_i915_private *dev_priv = dev->dev_private;
1766
	enum intel_display_power_domain power_domain;
1867
	enum intel_display_power_domain power_domain;
1767
	u32 pp;
1868
	u32 pp;
1768
	u32 pp_stat_reg, pp_ctrl_reg;
1869
	i915_reg_t pp_stat_reg, pp_ctrl_reg;
1769
	bool need_to_disable = !intel_dp->want_panel_vdd;
1870
	bool need_to_disable = !intel_dp->want_panel_vdd;
Line 1770... Line 1871...
1770
 
1871
 
Line 1771... Line 1872...
1771
	lockdep_assert_held(&dev_priv->pps_mutex);
1872
	lockdep_assert_held(&dev_priv->pps_mutex);
1772
 
1873
 
Line 1773... Line 1874...
1773
	if (!is_edp(intel_dp))
1874
	if (!is_edp(intel_dp))
1774
		return false;
1875
		return false;
Line 1775... Line 1876...
1775
 
1876
 
1776
	cancel_delayed_work(&intel_dp->panel_vdd_work);
1877
//	cancel_delayed_work(&intel_dp->panel_vdd_work);
Line 1839... Line 1940...
1839
	struct intel_digital_port *intel_dig_port =
1940
	struct intel_digital_port *intel_dig_port =
1840
		dp_to_dig_port(intel_dp);
1941
		dp_to_dig_port(intel_dp);
1841
	struct intel_encoder *intel_encoder = &intel_dig_port->base;
1942
	struct intel_encoder *intel_encoder = &intel_dig_port->base;
1842
	enum intel_display_power_domain power_domain;
1943
	enum intel_display_power_domain power_domain;
1843
	u32 pp;
1944
	u32 pp;
1844
	u32 pp_stat_reg, pp_ctrl_reg;
1945
	i915_reg_t pp_stat_reg, pp_ctrl_reg;
Line 1845... Line 1946...
1845
 
1946
 
Line 1846... Line 1947...
1846
	lockdep_assert_held(&dev_priv->pps_mutex);
1947
	lockdep_assert_held(&dev_priv->pps_mutex);
Line 1926... Line 2027...
1926
static void edp_panel_on(struct intel_dp *intel_dp)
2027
static void edp_panel_on(struct intel_dp *intel_dp)
1927
{
2028
{
1928
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
2029
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1929
	struct drm_i915_private *dev_priv = dev->dev_private;
2030
	struct drm_i915_private *dev_priv = dev->dev_private;
1930
	u32 pp;
2031
	u32 pp;
1931
	u32 pp_ctrl_reg;
2032
	i915_reg_t pp_ctrl_reg;
Line 1932... Line 2033...
1932
 
2033
 
Line 1933... Line 2034...
1933
	lockdep_assert_held(&dev_priv->pps_mutex);
2034
	lockdep_assert_held(&dev_priv->pps_mutex);
1934
 
2035
 
Line 1988... Line 2089...
1988
	struct intel_encoder *intel_encoder = &intel_dig_port->base;
2089
	struct intel_encoder *intel_encoder = &intel_dig_port->base;
1989
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
2090
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1990
	struct drm_i915_private *dev_priv = dev->dev_private;
2091
	struct drm_i915_private *dev_priv = dev->dev_private;
1991
	enum intel_display_power_domain power_domain;
2092
	enum intel_display_power_domain power_domain;
1992
	u32 pp;
2093
	u32 pp;
1993
	u32 pp_ctrl_reg;
2094
	i915_reg_t pp_ctrl_reg;
Line 1994... Line 2095...
1994
 
2095
 
Line 1995... Line 2096...
1995
	lockdep_assert_held(&dev_priv->pps_mutex);
2096
	lockdep_assert_held(&dev_priv->pps_mutex);
1996
 
2097
 
Line 2039... Line 2140...
2039
{
2140
{
2040
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2141
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2041
	struct drm_device *dev = intel_dig_port->base.base.dev;
2142
	struct drm_device *dev = intel_dig_port->base.base.dev;
2042
	struct drm_i915_private *dev_priv = dev->dev_private;
2143
	struct drm_i915_private *dev_priv = dev->dev_private;
2043
	u32 pp;
2144
	u32 pp;
2044
	u32 pp_ctrl_reg;
2145
	i915_reg_t pp_ctrl_reg;
Line 2045... Line 2146...
2045
 
2146
 
2046
	/*
2147
	/*
2047
	 * If we enable the backlight right away following a panel power
2148
	 * If we enable the backlight right away following a panel power
2048
	 * on, we may see slight flicker as the panel syncs with the eDP
2149
	 * on, we may see slight flicker as the panel syncs with the eDP
Line 2080... Line 2181...
2080
static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
2181
static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
2081
{
2182
{
2082
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
2183
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
2083
	struct drm_i915_private *dev_priv = dev->dev_private;
2184
	struct drm_i915_private *dev_priv = dev->dev_private;
2084
	u32 pp;
2185
	u32 pp;
2085
	u32 pp_ctrl_reg;
2186
	i915_reg_t pp_ctrl_reg;
Line 2086... Line 2187...
2086
 
2187
 
2087
	if (!is_edp(intel_dp))
2188
	if (!is_edp(intel_dp))
Line 2088... Line 2189...
2088
		return;
2189
		return;
Line 2139... Line 2240...
2139
		_intel_edp_backlight_on(intel_dp);
2240
		_intel_edp_backlight_on(intel_dp);
2140
	else
2241
	else
2141
		_intel_edp_backlight_off(intel_dp);
2242
		_intel_edp_backlight_off(intel_dp);
2142
}
2243
}
Line -... Line 2244...
-
 
2244
 
-
 
2245
static const char *state_string(bool enabled)
-
 
2246
{
-
 
2247
	return enabled ? "on" : "off";
-
 
2248
}
-
 
2249
 
-
 
2250
static void assert_dp_port(struct intel_dp *intel_dp, bool state)
-
 
2251
{
-
 
2252
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
-
 
2253
	struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
-
 
2254
	bool cur_state = I915_READ(intel_dp->output_reg) & DP_PORT_EN;
-
 
2255
 
-
 
2256
	I915_STATE_WARN(cur_state != state,
-
 
2257
			"DP port %c state assertion failure (expected %s, current %s)\n",
-
 
2258
			port_name(dig_port->port),
-
 
2259
			state_string(state), state_string(cur_state));
-
 
2260
}
-
 
2261
#define assert_dp_port_disabled(d) assert_dp_port((d), false)
-
 
2262
 
-
 
2263
static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state)
-
 
2264
{
-
 
2265
	bool cur_state = I915_READ(DP_A) & DP_PLL_ENABLE;
-
 
2266
 
-
 
2267
	I915_STATE_WARN(cur_state != state,
-
 
2268
			"eDP PLL state assertion failure (expected %s, current %s)\n",
-
 
2269
			state_string(state), state_string(cur_state));
-
 
2270
}
-
 
2271
#define assert_edp_pll_enabled(d) assert_edp_pll((d), true)
-
 
2272
#define assert_edp_pll_disabled(d) assert_edp_pll((d), false)
2143
 
2273
 
2144
static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
2274
static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
2145
{
2275
{
2146
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2276
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2147
	struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
-
 
2148
	struct drm_device *dev = crtc->dev;
2277
	struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
2149
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
Line -... Line 2278...
-
 
2278
	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-
 
2279
 
2150
	u32 dpa_ctl;
2280
	assert_pipe_disabled(dev_priv, crtc->pipe);
-
 
2281
	assert_dp_port_disabled(intel_dp);
-
 
2282
	assert_edp_pll_disabled(dev_priv);
2151
 
2283
 
-
 
2284
	DRM_DEBUG_KMS("enabling eDP PLL for clock %d\n",
-
 
2285
		      crtc->config->port_clock);
-
 
2286
 
-
 
2287
	intel_dp->DP &= ~DP_PLL_FREQ_MASK;
-
 
2288
 
-
 
2289
	if (crtc->config->port_clock == 162000)
-
 
2290
		intel_dp->DP |= DP_PLL_FREQ_162MHZ;
-
 
2291
	else
-
 
2292
		intel_dp->DP |= DP_PLL_FREQ_270MHZ;
-
 
2293
 
-
 
2294
	I915_WRITE(DP_A, intel_dp->DP);
Line 2152... Line -...
2152
	assert_pipe_disabled(dev_priv,
-
 
2153
			     to_intel_crtc(crtc)->pipe);
-
 
2154
 
-
 
2155
	DRM_DEBUG_KMS("\n");
-
 
2156
	dpa_ctl = I915_READ(DP_A);
-
 
2157
	WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n");
-
 
2158
	WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
-
 
2159
 
-
 
2160
	/* We don't adjust intel_dp->DP while tearing down the link, to
-
 
2161
	 * facilitate link retraining (e.g. after hotplug). Hence clear all
2295
	POSTING_READ(DP_A);
-
 
2296
	udelay(500);
2162
	 * enable bits here to ensure that we don't enable too much. */
2297
 
2163
	intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
2298
	intel_dp->DP |= DP_PLL_ENABLE;
2164
	intel_dp->DP |= DP_PLL_ENABLE;
2299
 
2165
	I915_WRITE(DP_A, intel_dp->DP);
2300
	I915_WRITE(DP_A, intel_dp->DP);
Line 2166... Line 2301...
2166
	POSTING_READ(DP_A);
2301
	POSTING_READ(DP_A);
2167
	udelay(200);
2302
	udelay(200);
2168
}
2303
}
2169
 
2304
 
2170
static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
2305
static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
-
 
2306
{
2171
{
2307
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2172
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2308
	struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
-
 
2309
	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
Line 2173... Line 2310...
2173
	struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2310
 
2174
	struct drm_device *dev = crtc->dev;
-
 
Line 2175... Line -...
2175
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
2176
	u32 dpa_ctl;
2311
	assert_pipe_disabled(dev_priv, crtc->pipe);
2177
 
-
 
2178
	assert_pipe_disabled(dev_priv,
-
 
2179
			     to_intel_crtc(crtc)->pipe);
2312
	assert_dp_port_disabled(intel_dp);
2180
 
-
 
2181
	dpa_ctl = I915_READ(DP_A);
-
 
2182
	WARN((dpa_ctl & DP_PLL_ENABLE) == 0,
-
 
2183
	     "dp pll off, should be on\n");
-
 
2184
	WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2313
	assert_edp_pll_enabled(dev_priv);
2185
 
2314
 
2186
	/* We can't rely on the value tracked for the DP register in
2315
	DRM_DEBUG_KMS("disabling eDP PLL\n");
2187
	 * intel_dp->DP because link_down must not change that (otherwise link
2316
 
Line 2188... Line 2317...
2188
	 * re-training will fail. */
2317
	intel_dp->DP &= ~DP_PLL_ENABLE;
Line 2230... Line 2359...
2230
	enum port port = dp_to_dig_port(intel_dp)->port;
2359
	enum port port = dp_to_dig_port(intel_dp)->port;
2231
	struct drm_device *dev = encoder->base.dev;
2360
	struct drm_device *dev = encoder->base.dev;
2232
	struct drm_i915_private *dev_priv = dev->dev_private;
2361
	struct drm_i915_private *dev_priv = dev->dev_private;
2233
	enum intel_display_power_domain power_domain;
2362
	enum intel_display_power_domain power_domain;
2234
	u32 tmp;
2363
	u32 tmp;
-
 
2364
	bool ret;
Line 2235... Line 2365...
2235
 
2365
 
2236
	power_domain = intel_display_port_power_domain(encoder);
2366
	power_domain = intel_display_port_power_domain(encoder);
2237
	if (!intel_display_power_is_enabled(dev_priv, power_domain))
2367
	if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
Line -... Line 2368...
-
 
2368
		return false;
-
 
2369
 
2238
		return false;
2370
	ret = false;
Line 2239... Line 2371...
2239
 
2371
 
2240
	tmp = I915_READ(intel_dp->output_reg);
2372
	tmp = I915_READ(intel_dp->output_reg);
Line 2241... Line 2373...
2241
 
2373
 
2242
	if (!(tmp & DP_PORT_EN))
2374
	if (!(tmp & DP_PORT_EN))
2243
		return false;
2375
		goto out;
2244
 
2376
 
Line 2245... Line 2377...
2245
	if (IS_GEN7(dev) && port == PORT_A) {
2377
	if (IS_GEN7(dev) && port == PORT_A) {
2246
		*pipe = PORT_TO_PIPE_CPT(tmp);
2378
		*pipe = PORT_TO_PIPE_CPT(tmp);
2247
	} else if (HAS_PCH_CPT(dev) && port != PORT_A) {
2379
	} else if (HAS_PCH_CPT(dev) && port != PORT_A) {
2248
		enum pipe p;
2380
		enum pipe p;
2249
 
2381
 
-
 
2382
		for_each_pipe(dev_priv, p) {
-
 
2383
			u32 trans_dp = I915_READ(TRANS_DP_CTL(p));
2250
		for_each_pipe(dev_priv, p) {
2384
			if (TRANS_DP_PIPE_TO_PORT(trans_dp) == port) {
2251
			u32 trans_dp = I915_READ(TRANS_DP_CTL(p));
2385
				*pipe = p;
Line 2252... Line 2386...
2252
			if (TRANS_DP_PIPE_TO_PORT(trans_dp) == port) {
2386
				ret = true;
2253
				*pipe = p;
2387
 
2254
				return true;
2388
				goto out;
2255
			}
2389
			}
2256
		}
2390
		}
2257
 
2391
 
2258
		DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
2392
		DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
Line 2259... Line 2393...
2259
			      intel_dp->output_reg);
2393
			      i915_mmio_reg_offset(intel_dp->output_reg));
-
 
2394
	} else if (IS_CHERRYVIEW(dev)) {
-
 
2395
		*pipe = DP_PORT_TO_PIPE_CHV(tmp);
-
 
2396
	} else {
-
 
2397
		*pipe = PORT_TO_PIPE(tmp);
-
 
2398
	}
2260
	} else if (IS_CHERRYVIEW(dev)) {
2399
 
Line 2261... Line 2400...
2261
		*pipe = DP_PORT_TO_PIPE_CHV(tmp);
2400
	ret = true;
2262
	} else {
2401
 
2263
		*pipe = PORT_TO_PIPE(tmp);
2402
out:
Line 2306... Line 2445...
2306
	}
2445
	}
Line 2307... Line 2446...
2307
 
2446
 
Line 2308... Line 2447...
2308
	pipe_config->base.adjusted_mode.flags |= flags;
2447
	pipe_config->base.adjusted_mode.flags |= flags;
2309
 
2448
 
2310
	if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
2449
	if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
Line 2311... Line 2450...
2311
	    tmp & DP_COLOR_RANGE_16_235)
2450
	    !IS_CHERRYVIEW(dev) && tmp & DP_COLOR_RANGE_16_235)
Line 2312... Line 2451...
2312
		pipe_config->limited_color_range = true;
2451
		pipe_config->limited_color_range = true;
2313
 
2452
 
Line 2314... Line 2453...
2314
	pipe_config->has_dp_encoder = true;
2453
	pipe_config->has_dp_encoder = true;
Line 2315... Line 2454...
2315
 
2454
 
2316
	pipe_config->lane_count =
2455
	pipe_config->lane_count =
2317
		((tmp & DP_PORT_WIDTH_MASK) >> DP_PORT_WIDTH_SHIFT) + 1;
2456
		((tmp & DP_PORT_WIDTH_MASK) >> DP_PORT_WIDTH_SHIFT) + 1;
2318
 
2457
 
2319
	intel_dp_get_m_n(crtc, pipe_config);
2458
	intel_dp_get_m_n(crtc, pipe_config);
2320
 
2459
 
Line 2382... Line 2521...
2382
{
2521
{
2383
	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2522
	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2384
	enum port port = dp_to_dig_port(intel_dp)->port;
2523
	enum port port = dp_to_dig_port(intel_dp)->port;
Line 2385... Line 2524...
2385
 
2524
 
-
 
2525
	intel_dp_link_down(intel_dp);
-
 
2526
 
2386
	intel_dp_link_down(intel_dp);
2527
	/* Only ilk+ has port A */
2387
	if (port == PORT_A)
2528
	if (port == PORT_A)
2388
		ironlake_edp_pll_off(intel_dp);
2529
		ironlake_edp_pll_off(intel_dp);
Line 2389... Line 2530...
2389
}
2530
}
Line 2541... Line 2682...
2541
 
2682
 
2542
static void intel_dp_enable_port(struct intel_dp *intel_dp)
2683
static void intel_dp_enable_port(struct intel_dp *intel_dp)
2543
{
2684
{
2544
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
2685
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
-
 
2686
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
2687
	struct intel_crtc *crtc =
Line 2545... Line 2688...
2545
	struct drm_i915_private *dev_priv = dev->dev_private;
2688
		to_intel_crtc(dp_to_dig_port(intel_dp)->base.base.crtc);
2546
 
2689
 
2547
	/* enable with pattern 1 (as per spec) */
2690
	/* enable with pattern 1 (as per spec) */
Line 2556... Line 2699...
2556
	 * without actually enabling the port, and then do another
2699
	 * without actually enabling the port, and then do another
2557
	 * write to enable the port. Otherwise link training will
2700
	 * write to enable the port. Otherwise link training will
2558
	 * fail when the power sequencer is freshly used for this port.
2701
	 * fail when the power sequencer is freshly used for this port.
2559
	 */
2702
	 */
2560
	intel_dp->DP |= DP_PORT_EN;
2703
	intel_dp->DP |= DP_PORT_EN;
-
 
2704
	if (crtc->config->has_audio)
-
 
2705
		intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
Line 2561... Line 2706...
2561
 
2706
 
2562
	I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2707
	I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2563
	POSTING_READ(intel_dp->output_reg);
2708
	POSTING_READ(intel_dp->output_reg);
Line 2568... Line 2713...
2568
	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2713
	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2569
	struct drm_device *dev = encoder->base.dev;
2714
	struct drm_device *dev = encoder->base.dev;
2570
	struct drm_i915_private *dev_priv = dev->dev_private;
2715
	struct drm_i915_private *dev_priv = dev->dev_private;
2571
	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2716
	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2572
	uint32_t dp_reg = I915_READ(intel_dp->output_reg);
2717
	uint32_t dp_reg = I915_READ(intel_dp->output_reg);
-
 
2718
	enum port port = dp_to_dig_port(intel_dp)->port;
-
 
2719
	enum pipe pipe = crtc->pipe;
Line 2573... Line 2720...
2573
 
2720
 
2574
	if (WARN_ON(dp_reg & DP_PORT_EN))
2721
	if (WARN_ON(dp_reg & DP_PORT_EN))
Line 2575... Line 2722...
2575
		return;
2722
		return;
Line 2576... Line 2723...
2576
 
2723
 
2577
	pps_lock(intel_dp);
2724
	pps_lock(intel_dp);
Line -... Line 2725...
-
 
2725
 
-
 
2726
	if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
-
 
2727
		vlv_init_panel_power_sequencer(intel_dp);
-
 
2728
 
-
 
2729
	/*
-
 
2730
	 * We get an occasional spurious underrun between the port
-
 
2731
	 * enable and vdd enable, when enabling port A eDP.
-
 
2732
	 *
-
 
2733
	 * FIXME: Not sure if this applies to (PCH) port D eDP as well
2578
 
2734
	 */
Line -... Line 2735...
-
 
2735
	if (port == PORT_A)
-
 
2736
		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
-
 
2737
 
-
 
2738
	intel_dp_enable_port(intel_dp);
-
 
2739
 
-
 
2740
	if (port == PORT_A && IS_GEN5(dev_priv)) {
-
 
2741
		/*
-
 
2742
		 * Underrun reporting for the other pipe was disabled in
-
 
2743
		 * g4x_pre_enable_dp(). The eDP PLL and port have now been
-
 
2744
		 * enabled, so it's now safe to re-enable underrun reporting.
-
 
2745
		 */
2579
	if (IS_VALLEYVIEW(dev))
2746
		intel_wait_for_vblank_if_active(dev_priv->dev, !pipe);
2580
		vlv_init_panel_power_sequencer(intel_dp);
2747
		intel_set_cpu_fifo_underrun_reporting(dev_priv, !pipe, true);
2581
 
2748
		intel_set_pch_fifo_underrun_reporting(dev_priv, !pipe, true);
Line -... Line 2749...
-
 
2749
	}
-
 
2750
 
-
 
2751
	edp_panel_vdd_on(intel_dp);
2582
	intel_dp_enable_port(intel_dp);
2752
	edp_panel_on(intel_dp);
Line 2583... Line 2753...
2583
 
2753
	edp_panel_vdd_off(intel_dp, true);
2584
	edp_panel_vdd_on(intel_dp);
2754
 
Line 2585... Line 2755...
2585
	edp_panel_on(intel_dp);
2755
	if (port == PORT_A)
2586
	edp_panel_vdd_off(intel_dp, true);
2756
		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
Line 2601... Line 2771...
2601
	intel_dp_start_link_train(intel_dp);
2771
	intel_dp_start_link_train(intel_dp);
2602
	intel_dp_stop_link_train(intel_dp);
2772
	intel_dp_stop_link_train(intel_dp);
Line 2603... Line 2773...
2603
 
2773
 
2604
	if (crtc->config->has_audio) {
2774
	if (crtc->config->has_audio) {
2605
		DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
2775
		DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
2606
				 pipe_name(crtc->pipe));
2776
				 pipe_name(pipe));
2607
		intel_audio_codec_enable(encoder);
2777
		intel_audio_codec_enable(encoder);
2608
	}
2778
	}
Line 2609... Line 2779...
2609
}
2779
}
Line 2624... Line 2794...
2624
	intel_psr_enable(intel_dp);
2794
	intel_psr_enable(intel_dp);
2625
}
2795
}
Line 2626... Line 2796...
2626
 
2796
 
2627
static void g4x_pre_enable_dp(struct intel_encoder *encoder)
2797
static void g4x_pre_enable_dp(struct intel_encoder *encoder)
-
 
2798
{
2628
{
2799
	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2629
	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2800
	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
-
 
2801
	enum port port = dp_to_dig_port(intel_dp)->port;
Line 2630... Line 2802...
2630
	struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2802
	enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
Line -... Line 2803...
-
 
2803
 
-
 
2804
	intel_dp_prepare(encoder);
-
 
2805
 
-
 
2806
	if (port == PORT_A && IS_GEN5(dev_priv)) {
-
 
2807
		/*
-
 
2808
		 * We get FIFO underruns on the other pipe when
-
 
2809
		 * enabling the CPU eDP PLL, and when enabling CPU
-
 
2810
		 * eDP port. We could potentially avoid the PLL
-
 
2811
		 * underrun with a vblank wait just prior to enabling
-
 
2812
		 * the PLL, but that doesn't appear to help the port
-
 
2813
		 * enable case. Just sweep it all under the rug.
-
 
2814
		 */
-
 
2815
		intel_set_cpu_fifo_underrun_reporting(dev_priv, !pipe, false);
2631
 
2816
		intel_set_pch_fifo_underrun_reporting(dev_priv, !pipe, false);
2632
	intel_dp_prepare(encoder);
2817
	}
2633
 
-
 
2634
	/* Only ilk+ has port A */
2818
 
2635
	if (dport->port == PORT_A) {
2819
	/* Only ilk+ has port A */
2636
		ironlake_set_pll_cpu_edp(intel_dp);
-
 
Line 2637... Line 2820...
2637
		ironlake_edp_pll_on(intel_dp);
2820
	if (port == PORT_A)
2638
	}
2821
		ironlake_edp_pll_on(intel_dp);
2639
}
2822
	}
2640
 
2823
 
2641
static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
2824
static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
2642
{
2825
{
Line 2643... Line 2826...
2643
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2826
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
Line 2644... Line 2827...
2644
	struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private;
2827
	struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private;
2645
	enum pipe pipe = intel_dp->pps_pipe;
2828
	enum pipe pipe = intel_dp->pps_pipe;
Line 2673... Line 2856...
2673
	lockdep_assert_held(&dev_priv->pps_mutex);
2856
	lockdep_assert_held(&dev_priv->pps_mutex);
Line 2674... Line 2857...
2674
 
2857
 
2675
	if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
2858
	if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
Line 2676... Line 2859...
2676
		return;
2859
		return;
2677
 
-
 
2678
	list_for_each_entry(encoder, &dev->mode_config.encoder_list,
2860
 
2679
			    base.head) {
2861
	for_each_intel_encoder(dev, encoder) {
Line 2680... Line 2862...
2680
		struct intel_dp *intel_dp;
2862
		struct intel_dp *intel_dp;
2681
		enum port port;
2863
		enum port port;
Line 3039... Line 3221...
3039
 
3221
 
3040
/*
3222
/*
3041
 * Fetch AUX CH registers 0x202 - 0x207 which contain
3223
 * Fetch AUX CH registers 0x202 - 0x207 which contain
3042
 * link status information
3224
 * link status information
3043
 */
3225
 */
3044
static bool
3226
bool
3045
intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
3227
intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
3046
{
3228
{
3047
	return intel_dp_dpcd_read_wake(&intel_dp->aux,
3229
	return intel_dp_dpcd_read_wake(&intel_dp->aux,
3048
				       DP_LANE0_1_STATUS,
3230
				       DP_LANE0_1_STATUS,
3049
				       link_status,
3231
				       link_status,
3050
				       DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
3232
				       DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
Line 3051... Line 3233...
3051
}
3233
}
3052
 
3234
 
3053
/* These are source-specific values. */
3235
/* These are source-specific values. */
3054
static uint8_t
3236
uint8_t
3055
intel_dp_voltage_max(struct intel_dp *intel_dp)
3237
intel_dp_voltage_max(struct intel_dp *intel_dp)
3056
{
3238
{
3057
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
3239
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
Line 3062... Line 3244...
3062
		return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3244
		return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3063
	else if (INTEL_INFO(dev)->gen >= 9) {
3245
	else if (INTEL_INFO(dev)->gen >= 9) {
3064
		if (dev_priv->edp_low_vswing && port == PORT_A)
3246
		if (dev_priv->edp_low_vswing && port == PORT_A)
3065
			return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3247
			return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3066
		return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3248
		return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3067
	} else if (IS_VALLEYVIEW(dev))
3249
	} else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
3068
		return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3250
		return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3069
	else if (IS_GEN7(dev) && port == PORT_A)
3251
	else if (IS_GEN7(dev) && port == PORT_A)
3070
		return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3252
		return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3071
	else if (HAS_PCH_CPT(dev) && port != PORT_A)
3253
	else if (HAS_PCH_CPT(dev) && port != PORT_A)
3072
		return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3254
		return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3073
	else
3255
	else
3074
		return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3256
		return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3075
}
3257
}
Line 3076... Line 3258...
3076
 
3258
 
3077
static uint8_t
3259
uint8_t
3078
intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
3260
intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
3079
{
3261
{
3080
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
3262
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
Line 3103... Line 3285...
3103
			return DP_TRAIN_PRE_EMPH_LEVEL_1;
3285
			return DP_TRAIN_PRE_EMPH_LEVEL_1;
3104
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3286
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3105
		default:
3287
		default:
3106
			return DP_TRAIN_PRE_EMPH_LEVEL_0;
3288
			return DP_TRAIN_PRE_EMPH_LEVEL_0;
3107
		}
3289
		}
3108
	} else if (IS_VALLEYVIEW(dev)) {
3290
	} else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
3109
		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3291
		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3110
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3292
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3111
			return DP_TRAIN_PRE_EMPH_LEVEL_3;
3293
			return DP_TRAIN_PRE_EMPH_LEVEL_3;
3112
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3294
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3113
			return DP_TRAIN_PRE_EMPH_LEVEL_2;
3295
			return DP_TRAIN_PRE_EMPH_LEVEL_2;
Line 3414... Line 3596...
3414
	mutex_unlock(&dev_priv->sb_lock);
3596
	mutex_unlock(&dev_priv->sb_lock);
Line 3415... Line 3597...
3415
 
3597
 
3416
	return 0;
3598
	return 0;
Line 3417... Line -...
3417
}
-
 
3418
 
-
 
3419
static void
-
 
3420
intel_get_adjust_train(struct intel_dp *intel_dp,
-
 
3421
		       const uint8_t link_status[DP_LINK_STATUS_SIZE])
-
 
3422
{
-
 
3423
	uint8_t v = 0;
-
 
3424
	uint8_t p = 0;
-
 
3425
	int lane;
-
 
3426
	uint8_t voltage_max;
-
 
3427
	uint8_t preemph_max;
-
 
3428
 
-
 
3429
	for (lane = 0; lane < intel_dp->lane_count; lane++) {
-
 
3430
		uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
-
 
3431
		uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
-
 
3432
 
-
 
3433
		if (this_v > v)
-
 
3434
			v = this_v;
-
 
3435
		if (this_p > p)
-
 
3436
			p = this_p;
-
 
3437
	}
-
 
3438
 
-
 
3439
	voltage_max = intel_dp_voltage_max(intel_dp);
-
 
3440
	if (v >= voltage_max)
-
 
3441
		v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
-
 
3442
 
-
 
3443
	preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
-
 
3444
	if (p >= preemph_max)
-
 
3445
		p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
-
 
3446
 
-
 
3447
	for (lane = 0; lane < 4; lane++)
-
 
3448
		intel_dp->train_set[lane] = v | p;
-
 
3449
}
3599
}
3450
 
3600
 
3451
static uint32_t
3601
static uint32_t
3452
gen4_signal_levels(uint8_t train_set)
3602
gen4_signal_levels(uint8_t train_set)
Line 3543... Line 3693...
3543
			      "0x%x\n", signal_levels);
3693
			      "0x%x\n", signal_levels);
3544
		return EDP_LINK_TRAIN_500MV_0DB_IVB;
3694
		return EDP_LINK_TRAIN_500MV_0DB_IVB;
3545
	}
3695
	}
3546
}
3696
}
Line 3547... Line -...
3547
 
-
 
3548
/* Properly updates "DP" with the correct signal levels. */
3697
 
3549
static void
3698
void
3550
intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
3699
intel_dp_set_signal_levels(struct intel_dp *intel_dp)
3551
{
3700
{
3552
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3701
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3553
	enum port port = intel_dig_port->port;
3702
	enum port port = intel_dig_port->port;
-
 
3703
	struct drm_device *dev = intel_dig_port->base.base.dev;
3554
	struct drm_device *dev = intel_dig_port->base.base.dev;
3704
	struct drm_i915_private *dev_priv = to_i915(dev);
3555
	uint32_t signal_levels, mask = 0;
3705
	uint32_t signal_levels, mask = 0;
Line 3556... Line 3706...
3556
	uint8_t train_set = intel_dp->train_set[0];
3706
	uint8_t train_set = intel_dp->train_set[0];
3557
 
3707
 
Line 3584... Line 3734...
3584
		train_set & DP_TRAIN_VOLTAGE_SWING_MASK);
3734
		train_set & DP_TRAIN_VOLTAGE_SWING_MASK);
3585
	DRM_DEBUG_KMS("Using pre-emphasis level %d\n",
3735
	DRM_DEBUG_KMS("Using pre-emphasis level %d\n",
3586
		(train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
3736
		(train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
3587
			DP_TRAIN_PRE_EMPHASIS_SHIFT);
3737
			DP_TRAIN_PRE_EMPHASIS_SHIFT);
Line 3588... Line 3738...
3588
 
3738
 
3589
	*DP = (*DP & ~mask) | signal_levels;
-
 
3590
}
-
 
3591
 
-
 
3592
static bool
-
 
3593
intel_dp_set_link_train(struct intel_dp *intel_dp,
-
 
3594
			uint32_t *DP,
-
 
3595
			uint8_t dp_train_pat)
-
 
3596
{
-
 
3597
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
-
 
3598
	struct drm_i915_private *dev_priv =
-
 
3599
		to_i915(intel_dig_port->base.base.dev);
-
 
3600
	uint8_t buf[sizeof(intel_dp->train_set) + 1];
-
 
3601
	int ret, len;
-
 
3602
 
-
 
Line 3603... Line 3739...
3603
	_intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
3739
	intel_dp->DP = (intel_dp->DP & ~mask) | signal_levels;
3604
 
3740
 
3605
	I915_WRITE(intel_dp->output_reg, *DP);
-
 
3606
	POSTING_READ(intel_dp->output_reg);
-
 
3607
 
-
 
3608
	buf[0] = dp_train_pat;
-
 
3609
	if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) ==
-
 
3610
	    DP_TRAINING_PATTERN_DISABLE) {
-
 
3611
		/* don't write DP_TRAINING_LANEx_SET on disable */
-
 
3612
		len = 1;
-
 
3613
	} else {
-
 
3614
		/* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */
-
 
3615
		memcpy(buf + 1, intel_dp->train_set, intel_dp->lane_count);
-
 
3616
		len = intel_dp->lane_count + 1;
-
 
3617
	}
-
 
3618
 
-
 
3619
	ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_PATTERN_SET,
-
 
3620
				buf, len);
-
 
3621
 
3741
	I915_WRITE(intel_dp->output_reg, intel_dp->DP);
Line 3622... Line 3742...
3622
	return ret == len;
3742
	POSTING_READ(intel_dp->output_reg);
3623
}
3743
	}
3624
 
3744
 
3625
static bool
3745
void
3626
intel_dp_reset_link_train(struct intel_dp *intel_dp, uint32_t *DP,
-
 
3627
			uint8_t dp_train_pat)
-
 
3628
{
-
 
3629
		memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
-
 
3630
	intel_dp_set_signal_levels(intel_dp, DP);
-
 
3631
	return intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
-
 
3632
}
-
 
3633
 
-
 
3634
static bool
-
 
3635
intel_dp_update_link_train(struct intel_dp *intel_dp, uint32_t *DP,
3746
intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
3636
			   const uint8_t link_status[DP_LINK_STATUS_SIZE])
3747
			uint8_t dp_train_pat)
3637
{
3748
{
3638
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
-
 
Line 3639... Line 3749...
3639
	struct drm_i915_private *dev_priv =
3749
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3640
		to_i915(intel_dig_port->base.base.dev);
-
 
Line 3641... Line 3750...
3641
	int ret;
3750
	struct drm_i915_private *dev_priv =
3642
 
3751
		to_i915(intel_dig_port->base.base.dev);
3643
	intel_get_adjust_train(intel_dp, link_status);
-
 
3644
	intel_dp_set_signal_levels(intel_dp, DP);
-
 
3645
 
-
 
3646
	I915_WRITE(intel_dp->output_reg, *DP);
-
 
3647
	POSTING_READ(intel_dp->output_reg);
-
 
3648
 
3752
 
Line 3649... Line 3753...
3649
	ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET,
3753
	_intel_dp_set_link_train(intel_dp, &intel_dp->DP, dp_train_pat);
3650
				intel_dp->train_set, intel_dp->lane_count);
3754
 
3651
 
3755
	I915_WRITE(intel_dp->output_reg, intel_dp->DP);
3652
	return ret == intel_dp->lane_count;
3756
	POSTING_READ(intel_dp->output_reg);
3653
}
3757
}
3654
 
3758
 
Line 3681... Line 3785...
3681
	if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
3785
	if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
3682
		     1))
3786
		     1))
3683
		DRM_ERROR("Timed out waiting for DP idle patterns\n");
3787
		DRM_ERROR("Timed out waiting for DP idle patterns\n");
3684
}
3788
}
Line 3685... Line -...
3685
 
-
 
3686
/* Enable corresponding port and start training pattern 1 */
-
 
3687
static void
-
 
3688
intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp)
-
 
3689
{
-
 
3690
	struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base;
-
 
3691
	struct drm_device *dev = encoder->dev;
-
 
3692
	int i;
-
 
3693
	uint8_t voltage;
-
 
3694
	int voltage_tries, loop_tries;
-
 
3695
	uint32_t DP = intel_dp->DP;
-
 
3696
	uint8_t link_config[2];
-
 
3697
	uint8_t link_bw, rate_select;
-
 
3698
 
-
 
3699
	if (HAS_DDI(dev))
-
 
3700
		intel_ddi_prepare_link_retrain(encoder);
-
 
3701
 
-
 
3702
	intel_dp_compute_rate(intel_dp, intel_dp->link_rate,
-
 
3703
			      &link_bw, &rate_select);
-
 
3704
 
-
 
3705
	/* Write the link configuration data */
-
 
3706
	link_config[0] = link_bw;
-
 
3707
	link_config[1] = intel_dp->lane_count;
-
 
3708
	if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
-
 
3709
		link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
-
 
3710
	drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2);
-
 
3711
	if (intel_dp->num_sink_rates)
-
 
3712
		drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET,
-
 
3713
				  &rate_select, 1);
-
 
3714
 
-
 
3715
	link_config[0] = 0;
-
 
3716
	link_config[1] = DP_SET_ANSI_8B10B;
-
 
3717
	drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2);
-
 
3718
 
-
 
3719
	DP |= DP_PORT_EN;
-
 
3720
 
-
 
3721
	/* clock recovery */
-
 
3722
	if (!intel_dp_reset_link_train(intel_dp, &DP,
-
 
3723
				       DP_TRAINING_PATTERN_1 |
-
 
3724
				       DP_LINK_SCRAMBLING_DISABLE)) {
-
 
3725
		DRM_ERROR("failed to enable link training\n");
-
 
3726
		return;
-
 
3727
	}
-
 
3728
 
-
 
3729
	voltage = 0xff;
-
 
3730
	voltage_tries = 0;
-
 
3731
	loop_tries = 0;
-
 
3732
	for (;;) {
-
 
3733
		uint8_t link_status[DP_LINK_STATUS_SIZE];
-
 
3734
 
-
 
3735
		drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
-
 
3736
		if (!intel_dp_get_link_status(intel_dp, link_status)) {
-
 
3737
			DRM_ERROR("failed to get link status\n");
-
 
3738
			break;
-
 
3739
		}
-
 
3740
 
-
 
3741
		if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
-
 
3742
			DRM_DEBUG_KMS("clock recovery OK\n");
-
 
3743
			break;
-
 
3744
		}
-
 
3745
 
-
 
3746
 
-
 
3747
		/* Check to see if we've tried the max voltage */
-
 
3748
		for (i = 0; i < intel_dp->lane_count; i++)
-
 
3749
			if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
-
 
3750
				break;
-
 
3751
		if (i == intel_dp->lane_count) {
-
 
3752
			++loop_tries;
-
 
3753
			if (loop_tries == 5) {
-
 
3754
				DRM_ERROR("too many full retries, give up\n");
-
 
3755
				break;
-
 
3756
			}
-
 
3757
			intel_dp_reset_link_train(intel_dp, &DP,
-
 
3758
						  DP_TRAINING_PATTERN_1 |
-
 
3759
						  DP_LINK_SCRAMBLING_DISABLE);
-
 
3760
			voltage_tries = 0;
-
 
3761
			continue;
-
 
3762
		}
-
 
3763
 
-
 
3764
		/* Check to see if we've tried the same voltage 5 times */
-
 
3765
		if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
-
 
3766
			++voltage_tries;
-
 
3767
			if (voltage_tries == 5) {
-
 
3768
				DRM_ERROR("too many voltage retries, give up\n");
-
 
3769
				break;
-
 
3770
			}
-
 
3771
		} else
-
 
3772
			voltage_tries = 0;
-
 
3773
		voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
-
 
3774
 
-
 
3775
		/* Update training set as requested by target */
-
 
3776
		if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
-
 
3777
			DRM_ERROR("failed to update link training\n");
-
 
3778
			break;
-
 
3779
		}
-
 
3780
	}
-
 
3781
 
-
 
3782
	intel_dp->DP = DP;
-
 
3783
}
-
 
3784
 
-
 
3785
static void
-
 
3786
intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp)
-
 
3787
{
-
 
3788
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
-
 
3789
	struct drm_device *dev = dig_port->base.base.dev;
-
 
3790
	bool channel_eq = false;
-
 
3791
	int tries, cr_tries;
-
 
3792
	uint32_t DP = intel_dp->DP;
-
 
3793
	uint32_t training_pattern = DP_TRAINING_PATTERN_2;
-
 
3794
 
-
 
3795
	/*
-
 
3796
	 * Training Pattern 3 for HBR2 or 1.2 devices that support it.
-
 
3797
	 *
-
 
3798
	 * Intel platforms that support HBR2 also support TPS3. TPS3 support is
-
 
3799
	 * also mandatory for downstream devices that support HBR2.
-
 
3800
	 *
-
 
3801
	 * Due to WaDisableHBR2 SKL < B0 is the only exception where TPS3 is
-
 
3802
	 * supported but still not enabled.
-
 
3803
	 */
-
 
3804
	if (intel_dp_source_supports_hbr2(dev) &&
-
 
3805
	    drm_dp_tps3_supported(intel_dp->dpcd))
-
 
3806
		training_pattern = DP_TRAINING_PATTERN_3;
-
 
3807
	else if (intel_dp->link_rate == 540000)
-
 
3808
		DRM_ERROR("5.4 Gbps link rate without HBR2/TPS3 support\n");
-
 
3809
 
-
 
3810
	/* channel equalization */
-
 
3811
	if (!intel_dp_set_link_train(intel_dp, &DP,
-
 
3812
				     training_pattern |
-
 
3813
				     DP_LINK_SCRAMBLING_DISABLE)) {
-
 
3814
		DRM_ERROR("failed to start channel equalization\n");
-
 
3815
		return;
-
 
3816
	}
-
 
3817
 
-
 
3818
	tries = 0;
-
 
3819
	cr_tries = 0;
-
 
3820
	channel_eq = false;
-
 
3821
	for (;;) {
-
 
3822
		uint8_t link_status[DP_LINK_STATUS_SIZE];
-
 
3823
 
-
 
3824
		if (cr_tries > 5) {
-
 
3825
			DRM_ERROR("failed to train DP, aborting\n");
-
 
3826
			break;
-
 
3827
		}
-
 
3828
 
-
 
3829
		drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
-
 
3830
		if (!intel_dp_get_link_status(intel_dp, link_status)) {
-
 
3831
			DRM_ERROR("failed to get link status\n");
-
 
3832
			break;
-
 
3833
		}
-
 
3834
 
-
 
3835
		/* Make sure clock is still ok */
-
 
3836
		if (!drm_dp_clock_recovery_ok(link_status,
-
 
3837
					      intel_dp->lane_count)) {
-
 
3838
			intel_dp_link_training_clock_recovery(intel_dp);
-
 
3839
			intel_dp_set_link_train(intel_dp, &DP,
-
 
3840
						training_pattern |
-
 
3841
						DP_LINK_SCRAMBLING_DISABLE);
-
 
3842
			cr_tries++;
-
 
3843
			continue;
-
 
3844
		}
-
 
3845
 
-
 
3846
		if (drm_dp_channel_eq_ok(link_status,
-
 
3847
					 intel_dp->lane_count)) {
-
 
3848
			channel_eq = true;
-
 
3849
			break;
-
 
3850
		}
-
 
3851
 
-
 
3852
		/* Try 5 times, then try clock recovery if that fails */
-
 
3853
		if (tries > 5) {
-
 
3854
			intel_dp_link_training_clock_recovery(intel_dp);
-
 
3855
			intel_dp_set_link_train(intel_dp, &DP,
-
 
3856
						training_pattern |
-
 
3857
						DP_LINK_SCRAMBLING_DISABLE);
-
 
3858
			tries = 0;
-
 
3859
			cr_tries++;
-
 
3860
			continue;
-
 
3861
		}
-
 
3862
 
-
 
3863
		/* Update training set as requested by target */
-
 
3864
		if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
-
 
3865
			DRM_ERROR("failed to update link training\n");
-
 
3866
			break;
-
 
3867
		}
-
 
3868
		++tries;
-
 
3869
	}
-
 
3870
 
-
 
3871
	intel_dp_set_idle_link_train(intel_dp);
-
 
3872
 
-
 
3873
	intel_dp->DP = DP;
-
 
3874
 
-
 
3875
	if (channel_eq)
-
 
3876
		DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
-
 
3877
	}
-
 
3878
 
-
 
3879
void intel_dp_stop_link_train(struct intel_dp *intel_dp)
-
 
3880
{
-
 
3881
	intel_dp_set_link_train(intel_dp, &intel_dp->DP,
-
 
3882
				DP_TRAINING_PATTERN_DISABLE);
-
 
3883
}
-
 
3884
 
-
 
3885
void
-
 
3886
intel_dp_start_link_train(struct intel_dp *intel_dp)
-
 
3887
{
-
 
3888
	intel_dp_link_training_clock_recovery(intel_dp);
-
 
3889
	intel_dp_link_training_channel_equalization(intel_dp);
-
 
3890
}
-
 
3891
 
3789
 
3892
static void
3790
static void
3893
intel_dp_link_down(struct intel_dp *intel_dp)
3791
intel_dp_link_down(struct intel_dp *intel_dp)
3894
{
3792
{
3895
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3793
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
Line 3929... Line 3827...
3929
	 * HW workaround for IBX, we need to move the port
3827
	 * HW workaround for IBX, we need to move the port
3930
	 * to transcoder A after disabling it to allow the
3828
	 * to transcoder A after disabling it to allow the
3931
	 * matching HDMI port to be enabled on transcoder A.
3829
	 * matching HDMI port to be enabled on transcoder A.
3932
	 */
3830
	 */
3933
	if (HAS_PCH_IBX(dev) && crtc->pipe == PIPE_B && port != PORT_A) {
3831
	if (HAS_PCH_IBX(dev) && crtc->pipe == PIPE_B && port != PORT_A) {
-
 
3832
		/*
-
 
3833
		 * We get CPU/PCH FIFO underruns on the other pipe when
-
 
3834
		 * doing the workaround. Sweep them under the rug.
-
 
3835
		 */
-
 
3836
		intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, false);
-
 
3837
		intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
-
 
3838
 
3934
		/* always enable with pattern 1 (as per spec) */
3839
		/* always enable with pattern 1 (as per spec) */
3935
		DP &= ~(DP_PIPEB_SELECT | DP_LINK_TRAIN_MASK);
3840
		DP &= ~(DP_PIPEB_SELECT | DP_LINK_TRAIN_MASK);
3936
		DP |= DP_PORT_EN | DP_LINK_TRAIN_PAT_1;
3841
		DP |= DP_PORT_EN | DP_LINK_TRAIN_PAT_1;
3937
		I915_WRITE(intel_dp->output_reg, DP);
3842
		I915_WRITE(intel_dp->output_reg, DP);
3938
		POSTING_READ(intel_dp->output_reg);
3843
		POSTING_READ(intel_dp->output_reg);
Line 3939... Line 3844...
3939
 
3844
 
3940
		DP &= ~DP_PORT_EN;
3845
		DP &= ~DP_PORT_EN;
3941
		I915_WRITE(intel_dp->output_reg, DP);
3846
		I915_WRITE(intel_dp->output_reg, DP);
-
 
3847
		POSTING_READ(intel_dp->output_reg);
-
 
3848
 
-
 
3849
		intel_wait_for_vblank_if_active(dev_priv->dev, PIPE_A);
-
 
3850
		intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
3942
		POSTING_READ(intel_dp->output_reg);
3851
		intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
Line 3943... Line 3852...
3943
	}
3852
	}
-
 
3853
 
-
 
3854
	msleep(intel_dp->panel_power_down_delay);
3944
 
3855
 
Line 3945... Line 3856...
3945
	msleep(intel_dp->panel_power_down_delay);
3856
	intel_dp->DP = DP;
3946
}
3857
}
3947
 
3858
 
Line 3988... Line 3899...
3988
				dev_priv->psr.psr2_support ? "supported" : "not supported");
3899
				dev_priv->psr.psr2_support ? "supported" : "not supported");
3989
		}
3900
		}
3990
	}
3901
	}
Line 3991... Line 3902...
3991
 
3902
 
3992
	DRM_DEBUG_KMS("Display Port TPS3 support: source %s, sink %s\n",
3903
	DRM_DEBUG_KMS("Display Port TPS3 support: source %s, sink %s\n",
3993
		      yesno(intel_dp_source_supports_hbr2(dev)),
3904
		      yesno(intel_dp_source_supports_hbr2(intel_dp)),
Line 3994... Line 3905...
3994
		      yesno(drm_dp_tps3_supported(intel_dp->dpcd)));
3905
		      yesno(drm_dp_tps3_supported(intel_dp->dpcd)));
3995
 
3906
 
3996
	/* Intermediate frequency support */
3907
	/* Intermediate frequency support */
Line 4078... Line 3989...
4078
}
3989
}
Line 4079... Line 3990...
4079
 
3990
 
4080
static int intel_dp_sink_crc_stop(struct intel_dp *intel_dp)
3991
static int intel_dp_sink_crc_stop(struct intel_dp *intel_dp)
4081
{
3992
{
-
 
3993
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4082
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3994
	struct drm_device *dev = dig_port->base.base.dev;
4083
	struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
3995
	struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
4084
	u8 buf;
3996
	u8 buf;
-
 
3997
	int ret = 0;
-
 
3998
	int count = 0;
Line 4085... Line 3999...
4085
	int ret = 0;
3999
	int attempts = 10;
4086
 
4000
 
4087
	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) {
4001
	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) {
4088
		DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
4002
		DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
Line 4095... Line 4009...
4095
		DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
4009
		DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
4096
		ret = -EIO;
4010
		ret = -EIO;
4097
		goto out;
4011
		goto out;
4098
	}
4012
	}
Line -... Line 4013...
-
 
4013
 
4099
 
4014
	do {
-
 
4015
		intel_wait_for_vblank(dev, intel_crtc->pipe);
-
 
4016
 
-
 
4017
		if (drm_dp_dpcd_readb(&intel_dp->aux,
-
 
4018
				      DP_TEST_SINK_MISC, &buf) < 0) {
-
 
4019
			ret = -EIO;
-
 
4020
			goto out;
-
 
4021
		}
-
 
4022
		count = buf & DP_TEST_COUNT_MASK;
-
 
4023
	} while (--attempts && count);
-
 
4024
 
-
 
4025
	if (attempts == 0) {
-
 
4026
		DRM_ERROR("TIMEOUT: Sink CRC counter is not zeroed\n");
-
 
4027
		ret = -ETIMEDOUT;
-
 
4028
	}
4100
	intel_dp->sink_crc.started = false;
4029
 
4101
 out:
4030
 out:
4102
	hsw_enable_ips(intel_crtc);
4031
	hsw_enable_ips(intel_crtc);
4103
	return ret;
4032
	return ret;
Line 4104... Line 4033...
4104
}
4033
}
4105
 
4034
 
4106
static int intel_dp_sink_crc_start(struct intel_dp *intel_dp)
4035
static int intel_dp_sink_crc_start(struct intel_dp *intel_dp)
-
 
4036
{
4107
{
4037
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4108
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4038
	struct drm_device *dev = dig_port->base.base.dev;
4109
	struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
4039
	struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
Line 4110... Line -...
4110
	u8 buf;
-
 
4111
	int ret;
-
 
4112
 
-
 
4113
	if (intel_dp->sink_crc.started) {
-
 
4114
		ret = intel_dp_sink_crc_stop(intel_dp);
-
 
4115
		if (ret)
-
 
4116
			return ret;
4040
	u8 buf;
4117
	}
4041
	int ret;
Line 4118... Line 4042...
4118
 
4042
 
4119
	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
4043
	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
Line 4120... Line -...
4120
		return -EIO;
-
 
4121
 
-
 
4122
	if (!(buf & DP_TEST_CRC_SUPPORTED))
4044
		return -EIO;
4123
		return -ENOTTY;
4045
 
Line -... Line 4046...
-
 
4046
	if (!(buf & DP_TEST_CRC_SUPPORTED))
-
 
4047
		return -ENOTTY;
-
 
4048
 
-
 
4049
	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
-
 
4050
		return -EIO;
-
 
4051
 
4124
 
4052
	if (buf & DP_TEST_SINK_START) {
Line 4125... Line 4053...
4125
	intel_dp->sink_crc.last_count = buf & DP_TEST_COUNT_MASK;
4053
		ret = intel_dp_sink_crc_stop(intel_dp);
4126
 
4054
		if (ret)
4127
	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
4055
			return ret;
4128
		return -EIO;
4056
	}
4129
 
4057
 
Line 4130... Line 4058...
4130
	hsw_disable_ips(intel_crtc);
4058
	hsw_disable_ips(intel_crtc);
4131
 
4059
 
4132
	if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
4060
	if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
Line 4133... Line 4061...
4133
			       buf | DP_TEST_SINK_START) < 0) {
4061
			       buf | DP_TEST_SINK_START) < 0) {
4134
		hsw_enable_ips(intel_crtc);
4062
		hsw_enable_ips(intel_crtc);
Line 4145... Line 4073...
4145
	struct drm_device *dev = dig_port->base.base.dev;
4073
	struct drm_device *dev = dig_port->base.base.dev;
4146
	struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
4074
	struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
4147
	u8 buf;
4075
	u8 buf;
4148
	int count, ret;
4076
	int count, ret;
4149
	int attempts = 6;
4077
	int attempts = 6;
4150
	bool old_equal_new;
-
 
Line 4151... Line 4078...
4151
 
4078
 
4152
	ret = intel_dp_sink_crc_start(intel_dp);
4079
	ret = intel_dp_sink_crc_start(intel_dp);
4153
	if (ret)
4080
	if (ret)
Line 4161... Line 4088...
4161
			ret = -EIO;
4088
			ret = -EIO;
4162
			goto stop;
4089
			goto stop;
4163
		}
4090
		}
4164
		count = buf & DP_TEST_COUNT_MASK;
4091
		count = buf & DP_TEST_COUNT_MASK;
Line 4165... Line -...
4165
 
-
 
4166
		/*
-
 
4167
		 * Count might be reset during the loop. In this case
-
 
4168
		 * last known count needs to be reset as well.
-
 
4169
		 */
-
 
4170
		if (count == 0)
-
 
4171
			intel_dp->sink_crc.last_count = 0;
-
 
4172
 
-
 
4173
		if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0) {
-
 
4174
			ret = -EIO;
-
 
4175
			goto stop;
-
 
4176
		}
-
 
4177
 
-
 
4178
		old_equal_new = (count == intel_dp->sink_crc.last_count &&
-
 
4179
				 !memcmp(intel_dp->sink_crc.last_crc, crc,
-
 
4180
					 6 * sizeof(u8)));
-
 
4181
 
4092
 
4182
	} while (--attempts && (count == 0 || old_equal_new));
-
 
4183
 
-
 
4184
	intel_dp->sink_crc.last_count = buf & DP_TEST_COUNT_MASK;
-
 
Line 4185... Line 4093...
4185
	memcpy(intel_dp->sink_crc.last_crc, crc, 6 * sizeof(u8));
4093
	} while (--attempts && count == 0);
4186
 
-
 
4187
	if (attempts == 0) {
-
 
4188
		if (old_equal_new) {
-
 
4189
			DRM_DEBUG_KMS("Unreliable Sink CRC counter: Current returned CRC is identical to the previous one\n");
4094
 
4190
		} else {
4095
	if (attempts == 0) {
4191
			DRM_ERROR("Panel is unable to calculate any CRC after 6 vblanks\n");
4096
			DRM_ERROR("Panel is unable to calculate any CRC after 6 vblanks\n");
4192
			ret = -ETIMEDOUT;
4097
			ret = -ETIMEDOUT;
-
 
4098
			goto stop;
-
 
4099
		}
-
 
4100
 
-
 
4101
	if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0) {
4193
			goto stop;
4102
		ret = -EIO;
Line 4194... Line 4103...
4194
		}
4103
		goto stop;
4195
	}
4104
	}
4196
 
4105
 
Line 4289... Line 4198...
4289
{
4198
{
4290
	uint8_t response = DP_TEST_NAK;
4199
	uint8_t response = DP_TEST_NAK;
4291
	uint8_t rxdata = 0;
4200
	uint8_t rxdata = 0;
4292
	int status = 0;
4201
	int status = 0;
Line 4293... Line -...
4293
 
-
 
4294
	intel_dp->compliance_test_active = 0;
-
 
4295
	intel_dp->compliance_test_type = 0;
-
 
4296
	intel_dp->compliance_test_data = 0;
-
 
4297
 
-
 
4298
	intel_dp->aux.i2c_nack_count = 0;
-
 
4299
	intel_dp->aux.i2c_defer_count = 0;
-
 
4300
 
4202
 
4301
	status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_REQUEST, &rxdata, 1);
4203
	status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_REQUEST, &rxdata, 1);
4302
	if (status <= 0) {
4204
	if (status <= 0) {
4303
		DRM_DEBUG_KMS("Could not read test request from sink\n");
4205
		DRM_DEBUG_KMS("Could not read test request from sink\n");
4304
		goto update_status;
4206
		goto update_status;
Line 4411... Line 4313...
4411
	u8 sink_irq_vector;
4313
	u8 sink_irq_vector;
4412
	u8 link_status[DP_LINK_STATUS_SIZE];
4314
	u8 link_status[DP_LINK_STATUS_SIZE];
Line 4413... Line 4315...
4413
 
4315
 
Line -... Line 4316...
-
 
4316
	WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
-
 
4317
 
-
 
4318
	/*
-
 
4319
	 * Clearing compliance test variables to allow capturing
-
 
4320
	 * of values for next automated test request.
-
 
4321
	 */
-
 
4322
	intel_dp->compliance_test_active = 0;
-
 
4323
	intel_dp->compliance_test_type = 0;
4414
	WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
4324
	intel_dp->compliance_test_data = 0;
4415
 
4325
 
Line 4416... Line 4326...
4416
	if (!intel_encoder->base.crtc)
4326
	if (!intel_encoder->base.crtc)
4417
		return;
4327
		return;
Line 4441... Line 4351...
4441
			DRM_DEBUG_DRIVER("Test request in short pulse not handled\n");
4351
			DRM_DEBUG_DRIVER("Test request in short pulse not handled\n");
4442
		if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4352
		if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4443
			DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4353
			DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4444
	}
4354
	}
Line -... Line 4355...
-
 
4355
 
-
 
4356
	/* if link training is requested we should perform it always */
4445
 
4357
	if ((intel_dp->compliance_test_type == DP_TEST_LINK_TRAINING) ||
4446
	if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
4358
		(!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count))) {
4447
		DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
4359
		DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
4448
			      intel_encoder->base.name);
4360
			      intel_encoder->base.name);
4449
		intel_dp_start_link_train(intel_dp);
4361
		intel_dp_start_link_train(intel_dp);
4450
		intel_dp_stop_link_train(intel_dp);
4362
		intel_dp_stop_link_train(intel_dp);
Line 4644... Line 4556...
4644
 * @dev_priv: i915 private structure
4556
 * @dev_priv: i915 private structure
4645
 * @port: the port to test
4557
 * @port: the port to test
4646
 *
4558
 *
4647
 * Return %true if @port is connected, %false otherwise.
4559
 * Return %true if @port is connected, %false otherwise.
4648
 */
4560
 */
4649
static bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
4561
bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
4650
					 struct intel_digital_port *port)
4562
					 struct intel_digital_port *port)
4651
{
4563
{
4652
	if (HAS_PCH_IBX(dev_priv))
4564
	if (HAS_PCH_IBX(dev_priv))
4653
		return ibx_digital_port_connected(dev_priv, port);
4565
		return ibx_digital_port_connected(dev_priv, port);
4654
	if (HAS_PCH_SPLIT(dev_priv))
4566
	if (HAS_PCH_SPLIT(dev_priv))
Line 4659... Line 4571...
4659
		return gm45_digital_port_connected(dev_priv, port);
4571
		return gm45_digital_port_connected(dev_priv, port);
4660
	else
4572
	else
4661
		return g4x_digital_port_connected(dev_priv, port);
4573
		return g4x_digital_port_connected(dev_priv, port);
4662
}
4574
}
Line 4663... Line -...
4663
 
-
 
4664
static enum drm_connector_status
-
 
4665
ironlake_dp_detect(struct intel_dp *intel_dp)
-
 
4666
{
-
 
4667
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
-
 
4668
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
4669
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
-
 
4670
 
-
 
4671
	if (!intel_digital_port_connected(dev_priv, intel_dig_port))
-
 
4672
		return connector_status_disconnected;
-
 
4673
 
-
 
4674
	return intel_dp_detect_dpcd(intel_dp);
-
 
4675
}
-
 
4676
 
-
 
4677
static enum drm_connector_status
-
 
4678
g4x_dp_detect(struct intel_dp *intel_dp)
-
 
4679
{
-
 
4680
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
-
 
4681
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
-
 
4682
 
-
 
4683
	/* Can't disconnect eDP, but you can close the lid... */
-
 
4684
	if (is_edp(intel_dp)) {
-
 
4685
		enum drm_connector_status status;
-
 
4686
 
-
 
4687
		status = intel_panel_detect(dev);
-
 
4688
		if (status == connector_status_unknown)
-
 
4689
			status = connector_status_connected;
-
 
4690
		return status;
-
 
4691
	}
-
 
4692
 
-
 
4693
	if (!intel_digital_port_connected(dev->dev_private, intel_dig_port))
-
 
4694
		return connector_status_disconnected;
-
 
4695
 
-
 
4696
	return intel_dp_detect_dpcd(intel_dp);
-
 
4697
}
-
 
4698
 
4575
 
4699
static struct edid *
4576
static struct edid *
4700
intel_dp_get_edid(struct intel_dp *intel_dp)
4577
intel_dp_get_edid(struct intel_dp *intel_dp)
4701
{
4578
{
Line 4766... Line 4643...
4766
	intel_display_power_get(to_i915(dev), power_domain);
4643
	intel_display_power_get(to_i915(dev), power_domain);
Line 4767... Line 4644...
4767
 
4644
 
4768
	/* Can't disconnect eDP, but you can close the lid... */
4645
	/* Can't disconnect eDP, but you can close the lid... */
4769
	if (is_edp(intel_dp))
4646
	if (is_edp(intel_dp))
4770
		status = edp_detect(intel_dp);
4647
		status = edp_detect(intel_dp);
-
 
4648
	else if (intel_digital_port_connected(to_i915(dev),
4771
	else if (HAS_PCH_SPLIT(dev))
4649
					      dp_to_dig_port(intel_dp)))
4772
		status = ironlake_dp_detect(intel_dp);
4650
		status = intel_dp_detect_dpcd(intel_dp);
4773
	else
4651
	else
-
 
4652
		status = connector_status_disconnected;
4774
		status = g4x_dp_detect(intel_dp);
4653
 
-
 
4654
	if (status != connector_status_connected) {
-
 
4655
		intel_dp->compliance_test_active = 0;
-
 
4656
		intel_dp->compliance_test_type = 0;
-
 
4657
		intel_dp->compliance_test_data = 0;
4775
	if (status != connector_status_connected)
4658
 
-
 
4659
		goto out;
Line 4776... Line 4660...
4776
		goto out;
4660
	}
Line 4777... Line 4661...
4777
 
4661
 
4778
	intel_dp_probe_oui(intel_dp);
4662
	intel_dp_probe_oui(intel_dp);
Line 4785... Line 4669...
4785
			intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4669
			intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4786
		status = connector_status_disconnected;
4670
		status = connector_status_disconnected;
4787
		goto out;
4671
		goto out;
4788
	}
4672
	}
Line -... Line 4673...
-
 
4673
 
-
 
4674
	/*
-
 
4675
	 * Clearing NACK and defer counts to get their exact values
-
 
4676
	 * while reading EDID which are required by Compliance tests
-
 
4677
	 * 4.2.2.4 and 4.2.2.5
-
 
4678
	 */
-
 
4679
	intel_dp->aux.i2c_nack_count = 0;
-
 
4680
	intel_dp->aux.i2c_defer_count = 0;
4789
 
4681
 
Line 4790... Line 4682...
4790
	intel_dp_set_edid(intel_dp);
4682
	intel_dp_set_edid(intel_dp);
4791
 
4683
 
4792
	if (intel_encoder->type != INTEL_OUTPUT_EDP)
4684
	if (intel_encoder->type != INTEL_OUTPUT_EDP)
Line 4989... Line 4881...
4989
void intel_dp_encoder_destroy(struct drm_encoder *encoder)
4881
void intel_dp_encoder_destroy(struct drm_encoder *encoder)
4990
{
4882
{
4991
	struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
4883
	struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
4992
	struct intel_dp *intel_dp = &intel_dig_port->dp;
4884
	struct intel_dp *intel_dp = &intel_dig_port->dp;
Line 4993... Line 4885...
4993
 
4885
 
4994
	drm_dp_aux_unregister(&intel_dp->aux);
4886
	intel_dp_aux_fini(intel_dp);
4995
	intel_dp_mst_encoder_cleanup(intel_dig_port);
4887
	intel_dp_mst_encoder_cleanup(intel_dig_port);
4996
	if (is_edp(intel_dp)) {
4888
	if (is_edp(intel_dp)) {
4997
		cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
4889
//		cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
4998
		/*
4890
		/*
4999
		 * vdd might still be enabled do to the delayed vdd off.
4891
		 * vdd might still be enabled do to the delayed vdd off.
5000
		 * Make sure vdd is actually turned off here.
4892
		 * Make sure vdd is actually turned off here.
5001
		 */
4893
		 */
5002
		pps_lock(intel_dp);
4894
		pps_lock(intel_dp);
5003
		edp_panel_vdd_off_sync(intel_dp);
4895
		edp_panel_vdd_off_sync(intel_dp);
Line -... Line 4896...
-
 
4896
		pps_unlock(intel_dp);
-
 
4897
 
-
 
4898
		if (intel_dp->edp_notifier.notifier_call) {
5004
		pps_unlock(intel_dp);
4899
			intel_dp->edp_notifier.notifier_call = NULL;
5005
 
4900
		}
5006
	}
4901
	}
5007
	drm_encoder_cleanup(encoder);
4902
	drm_encoder_cleanup(encoder);
Line 5017... Line 4912...
5017
 
4912
 
5018
	/*
4913
	/*
5019
	 * vdd might still be enabled do to the delayed vdd off.
4914
	 * vdd might still be enabled do to the delayed vdd off.
5020
	 * Make sure vdd is actually turned off here.
4915
	 * Make sure vdd is actually turned off here.
5021
	 */
4916
	 */
5022
	cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
4917
//	cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5023
	pps_lock(intel_dp);
4918
	pps_lock(intel_dp);
5024
	edp_panel_vdd_off_sync(intel_dp);
4919
	edp_panel_vdd_off_sync(intel_dp);
5025
	pps_unlock(intel_dp);
4920
	pps_unlock(intel_dp);
Line 5050... Line 4945...
5050
	edp_panel_vdd_schedule_off(intel_dp);
4945
	edp_panel_vdd_schedule_off(intel_dp);
5051
}
4946
}
Line 5052... Line 4947...
5052
 
4947
 
5053
void intel_dp_encoder_reset(struct drm_encoder *encoder)
4948
void intel_dp_encoder_reset(struct drm_encoder *encoder)
5054
{
-
 
5055
	struct drm_i915_private *dev_priv = to_i915(encoder->dev);
4949
{
5056
	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
-
 
5057
 
-
 
5058
	if (!HAS_DDI(dev_priv))
-
 
Line 5059... Line 4950...
5059
		intel_dp->DP = I915_READ(intel_dp->output_reg);
4950
	struct intel_dp *intel_dp;
5060
 
4951
 
Line -... Line 4952...
-
 
4952
	if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
-
 
4953
		return;
5061
	if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
4954
 
Line 5062... Line 4955...
5062
		return;
4955
	intel_dp = enc_to_intel_dp(encoder);
5063
 
4956
 
5064
	pps_lock(intel_dp);
4957
	pps_lock(intel_dp);
5065
 
4958
 
5066
	/*
4959
	/*
5067
	 * Read out the current power sequencer assignment,
4960
	 * Read out the current power sequencer assignment,
Line 5068... Line 4961...
5068
	 * in case the BIOS did something with it.
4961
	 * in case the BIOS did something with it.
Line 5069... Line 4962...
5069
	 */
4962
	 */
Line 5130... Line 5023...
5130
 
5023
 
5131
	power_domain = intel_display_port_aux_power_domain(intel_encoder);
5024
	power_domain = intel_display_port_aux_power_domain(intel_encoder);
Line 5132... Line 5025...
5132
	intel_display_power_get(dev_priv, power_domain);
5025
	intel_display_power_get(dev_priv, power_domain);
-
 
5026
 
-
 
5027
	if (long_hpd) {
-
 
5028
		/* indicate that we need to restart link training */
5133
 
5029
		intel_dp->train_set_valid = false;
5134
	if (long_hpd) {
5030
 
Line 5135... Line 5031...
5135
		if (!intel_digital_port_connected(dev_priv, intel_dig_port))
5031
		if (!intel_digital_port_connected(dev_priv, intel_dig_port))
5136
			goto mst_fail;
5032
			goto mst_fail;
Line 5174... Line 5070...
5174
	intel_display_power_put(dev_priv, power_domain);
5070
	intel_display_power_put(dev_priv, power_domain);
Line 5175... Line 5071...
5175
 
5071
 
5176
	return ret;
5072
	return ret;
Line 5177... Line -...
5177
}
-
 
5178
 
-
 
5179
/* Return which DP Port should be selected for Transcoder DP control */
-
 
5180
int
-
 
5181
intel_trans_dp_port_sel(struct drm_crtc *crtc)
-
 
5182
{
-
 
5183
	struct drm_device *dev = crtc->dev;
-
 
5184
	struct intel_encoder *intel_encoder;
-
 
5185
	struct intel_dp *intel_dp;
-
 
5186
 
-
 
5187
	for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
-
 
5188
		intel_dp = enc_to_intel_dp(&intel_encoder->base);
-
 
5189
 
-
 
5190
		if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
-
 
5191
		    intel_encoder->type == INTEL_OUTPUT_EDP)
-
 
5192
			return intel_dp->output_reg;
-
 
5193
	}
-
 
5194
 
-
 
5195
	return -1;
-
 
5196
}
5073
}
5197
 
5074
 
5198
/* check the VBT to see whether the eDP is on another port */
5075
/* check the VBT to see whether the eDP is on another port */
5199
bool intel_dp_is_edp(struct drm_device *dev, enum port port)
5076
bool intel_dp_is_edp(struct drm_device *dev, enum port port)
5200
{
5077
{
Line 5264... Line 5141...
5264
{
5141
{
5265
	struct drm_i915_private *dev_priv = dev->dev_private;
5142
	struct drm_i915_private *dev_priv = dev->dev_private;
5266
	struct edp_power_seq cur, vbt, spec,
5143
	struct edp_power_seq cur, vbt, spec,
5267
		*final = &intel_dp->pps_delays;
5144
		*final = &intel_dp->pps_delays;
5268
	u32 pp_on, pp_off, pp_div = 0, pp_ctl = 0;
5145
	u32 pp_on, pp_off, pp_div = 0, pp_ctl = 0;
5269
	int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg = 0;
5146
	i915_reg_t pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg;
Line 5270... Line 5147...
5270
 
5147
 
Line 5271... Line 5148...
5271
	lockdep_assert_held(&dev_priv->pps_mutex);
5148
	lockdep_assert_held(&dev_priv->pps_mutex);
5272
 
5149
 
Line 5386... Line 5263...
5386
					      struct intel_dp *intel_dp)
5263
					      struct intel_dp *intel_dp)
5387
{
5264
{
5388
	struct drm_i915_private *dev_priv = dev->dev_private;
5265
	struct drm_i915_private *dev_priv = dev->dev_private;
5389
	u32 pp_on, pp_off, pp_div, port_sel = 0;
5266
	u32 pp_on, pp_off, pp_div, port_sel = 0;
5390
	int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
5267
	int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
5391
	int pp_on_reg, pp_off_reg, pp_div_reg = 0, pp_ctrl_reg;
5268
	i915_reg_t pp_on_reg, pp_off_reg, pp_div_reg, pp_ctrl_reg;
5392
	enum port port = dp_to_dig_port(intel_dp)->port;
5269
	enum port port = dp_to_dig_port(intel_dp)->port;
5393
	const struct edp_power_seq *seq = &intel_dp->pps_delays;
5270
	const struct edp_power_seq *seq = &intel_dp->pps_delays;
Line 5394... Line 5271...
5394
 
5271
 
Line 5441... Line 5318...
5441
				<< PANEL_POWER_CYCLE_DELAY_SHIFT);
5318
				<< PANEL_POWER_CYCLE_DELAY_SHIFT);
5442
	}
5319
	}
Line 5443... Line 5320...
5443
 
5320
 
5444
	/* Haswell doesn't have any port selection bits for the panel
5321
	/* Haswell doesn't have any port selection bits for the panel
5445
	 * power sequencer any more. */
5322
	 * power sequencer any more. */
5446
	if (IS_VALLEYVIEW(dev)) {
5323
	if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
5447
		port_sel = PANEL_PORT_SELECT_VLV(port);
5324
		port_sel = PANEL_PORT_SELECT_VLV(port);
5448
	} else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
5325
	} else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
5449
		if (port == PORT_A)
5326
		if (port == PORT_A)
5450
			port_sel = PANEL_PORT_SELECT_DPA;
5327
			port_sel = PANEL_PORT_SELECT_DPA;
Line 5548... Line 5425...
5548
		case DRRS_MAX_RR:
5425
		case DRRS_MAX_RR:
5549
		default:
5426
		default:
5550
			DRM_ERROR("Unsupported refreshrate type\n");
5427
			DRM_ERROR("Unsupported refreshrate type\n");
5551
		}
5428
		}
5552
	} else if (INTEL_INFO(dev)->gen > 6) {
5429
	} else if (INTEL_INFO(dev)->gen > 6) {
5553
		u32 reg = PIPECONF(intel_crtc->config->cpu_transcoder);
5430
		i915_reg_t reg = PIPECONF(intel_crtc->config->cpu_transcoder);
5554
		u32 val;
5431
		u32 val;
Line 5555... Line 5432...
5555
 
5432
 
5556
		val = I915_READ(reg);
5433
		val = I915_READ(reg);
5557
		if (index > DRRS_HIGH_RR) {
5434
		if (index > DRRS_HIGH_RR) {
5558
			if (IS_VALLEYVIEW(dev))
5435
			if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
5559
				val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5436
				val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5560
			else
5437
			else
5561
				val |= PIPECONF_EDP_RR_MODE_SWITCH;
5438
				val |= PIPECONF_EDP_RR_MODE_SWITCH;
5562
		} else {
5439
		} else {
5563
			if (IS_VALLEYVIEW(dev))
5440
			if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
5564
				val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5441
				val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5565
			else
5442
			else
5566
				val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
5443
				val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
5567
		}
5444
		}
Line 5634... Line 5511...
5634
			fixed_mode->vrefresh);
5511
			fixed_mode->vrefresh);
Line 5635... Line 5512...
5635
 
5512
 
5636
	dev_priv->drrs.dp = NULL;
5513
	dev_priv->drrs.dp = NULL;
Line 5637... Line 5514...
5637
	mutex_unlock(&dev_priv->drrs.mutex);
5514
	mutex_unlock(&dev_priv->drrs.mutex);
5638
 
5515
 
Line 5639... Line 5516...
5639
	cancel_delayed_work_sync(&dev_priv->drrs.work);
5516
//	cancel_delayed_work_sync(&dev_priv->drrs.work);
5640
}
5517
}
5641
 
5518
 
Line 5687... Line 5564...
5687
	enum pipe pipe;
5564
	enum pipe pipe;
Line 5688... Line 5565...
5688
 
5565
 
5689
	if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5566
	if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
Line 5690... Line 5567...
5690
		return;
5567
		return;
Line 5691... Line 5568...
5691
 
5568
 
5692
	cancel_delayed_work(&dev_priv->drrs.work);
5569
//	cancel_delayed_work(&dev_priv->drrs.work);
5693
 
5570
 
5694
	mutex_lock(&dev_priv->drrs.mutex);
5571
	mutex_lock(&dev_priv->drrs.mutex);
Line 5732... Line 5609...
5732
	enum pipe pipe;
5609
	enum pipe pipe;
Line 5733... Line 5610...
5733
 
5610
 
5734
	if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5611
	if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
Line 5735... Line 5612...
5735
		return;
5612
		return;
Line 5736... Line 5613...
5736
 
5613
 
5737
	cancel_delayed_work(&dev_priv->drrs.work);
5614
//	cancel_delayed_work(&dev_priv->drrs.work);
5738
 
5615
 
5739
	mutex_lock(&dev_priv->drrs.mutex);
5616
	mutex_lock(&dev_priv->drrs.mutex);
Line 5925... Line 5802...
5925
		if (fixed_mode)
5802
		if (fixed_mode)
5926
			fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
5803
			fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
5927
	}
5804
	}
5928
	mutex_unlock(&dev->mode_config.mutex);
5805
	mutex_unlock(&dev->mode_config.mutex);
Line 5929... Line 5806...
5929
 
5806
 
-
 
5807
	if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
-
 
5808
//		intel_dp->edp_notifier.notifier_call = edp_notify_handler;
Line 5930... Line 5809...
5930
	if (IS_VALLEYVIEW(dev)) {
5809
//		register_reboot_notifier(&intel_dp->edp_notifier);
5931
 
5810
 
5932
		/*
5811
		/*
5933
		 * Figure out the current pipe for the initial backlight setup.
5812
		 * Figure out the current pipe for the initial backlight setup.
Line 5964... Line 5843...
5964
	struct intel_dp *intel_dp = &intel_dig_port->dp;
5843
	struct intel_dp *intel_dp = &intel_dig_port->dp;
5965
	struct intel_encoder *intel_encoder = &intel_dig_port->base;
5844
	struct intel_encoder *intel_encoder = &intel_dig_port->base;
5966
	struct drm_device *dev = intel_encoder->base.dev;
5845
	struct drm_device *dev = intel_encoder->base.dev;
5967
	struct drm_i915_private *dev_priv = dev->dev_private;
5846
	struct drm_i915_private *dev_priv = dev->dev_private;
5968
	enum port port = intel_dig_port->port;
5847
	enum port port = intel_dig_port->port;
5969
	int type;
5848
	int type, ret;
Line 5970... Line 5849...
5970
 
5849
 
Line 5971... Line 5850...
5971
	intel_dp->pps_pipe = INVALID_PIPE;
5850
	intel_dp->pps_pipe = INVALID_PIPE;
5972
 
5851
 
5973
	/* intel_dp vfuncs */
5852
	/* intel_dp vfuncs */
5974
	if (INTEL_INFO(dev)->gen >= 9)
5853
	if (INTEL_INFO(dev)->gen >= 9)
5975
		intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
5854
		intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
5976
	else if (IS_VALLEYVIEW(dev))
5855
	else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
5977
		intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
5856
		intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
5978
	else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
5857
	else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
5979
		intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
5858
		intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
Line 5985... Line 5864...
5985
	if (INTEL_INFO(dev)->gen >= 9)
5864
	if (INTEL_INFO(dev)->gen >= 9)
5986
		intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
5865
		intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
5987
	else
5866
	else
5988
		intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
5867
		intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
Line -... Line 5868...
-
 
5868
 
-
 
5869
	if (HAS_DDI(dev))
-
 
5870
		intel_dp->prepare_link_retrain = intel_ddi_prepare_link_retrain;
5989
 
5871
 
5990
	/* Preserve the current hw state. */
5872
	/* Preserve the current hw state. */
5991
	intel_dp->DP = I915_READ(intel_dp->output_reg);
5873
	intel_dp->DP = I915_READ(intel_dp->output_reg);
Line 5992... Line 5874...
5992
	intel_dp->attached_connector = intel_connector;
5874
	intel_dp->attached_connector = intel_connector;
Line 6003... Line 5885...
6003
	 */
5885
	 */
6004
	if (type == DRM_MODE_CONNECTOR_eDP)
5886
	if (type == DRM_MODE_CONNECTOR_eDP)
6005
		intel_encoder->type = INTEL_OUTPUT_EDP;
5887
		intel_encoder->type = INTEL_OUTPUT_EDP;
Line 6006... Line 5888...
6006
 
5888
 
6007
	/* eDP only on port B and/or C on vlv/chv */
5889
	/* eDP only on port B and/or C on vlv/chv */
6008
	if (WARN_ON(IS_VALLEYVIEW(dev) && is_edp(intel_dp) &&
5890
	if (WARN_ON((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
6009
		    port != PORT_B && port != PORT_C))
5891
		    is_edp(intel_dp) && port != PORT_B && port != PORT_C))
Line 6010... Line 5892...
6010
		return false;
5892
		return false;
6011
 
5893
 
6012
	DRM_DEBUG_KMS("Adding %s connector on port %c\n",
5894
	DRM_DEBUG_KMS("Adding %s connector on port %c\n",
Line 6036... Line 5918...
6036
	case PORT_A:
5918
	case PORT_A:
6037
		intel_encoder->hpd_pin = HPD_PORT_A;
5919
		intel_encoder->hpd_pin = HPD_PORT_A;
6038
		break;
5920
		break;
6039
	case PORT_B:
5921
	case PORT_B:
6040
		intel_encoder->hpd_pin = HPD_PORT_B;
5922
		intel_encoder->hpd_pin = HPD_PORT_B;
6041
		if (IS_BROXTON(dev_priv) && (INTEL_REVID(dev) < BXT_REVID_B0))
5923
		if (IS_BXT_REVID(dev, 0, BXT_REVID_A1))
6042
			intel_encoder->hpd_pin = HPD_PORT_A;
5924
			intel_encoder->hpd_pin = HPD_PORT_A;
6043
		break;
5925
		break;
6044
	case PORT_C:
5926
	case PORT_C:
6045
		intel_encoder->hpd_pin = HPD_PORT_C;
5927
		intel_encoder->hpd_pin = HPD_PORT_C;
6046
		break;
5928
		break;
Line 6055... Line 5937...
6055
	}
5937
	}
Line 6056... Line 5938...
6056
 
5938
 
6057
	if (is_edp(intel_dp)) {
5939
	if (is_edp(intel_dp)) {
6058
		pps_lock(intel_dp);
5940
		pps_lock(intel_dp);
6059
		intel_dp_init_panel_power_timestamps(intel_dp);
5941
		intel_dp_init_panel_power_timestamps(intel_dp);
6060
		if (IS_VALLEYVIEW(dev))
5942
		if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
6061
			vlv_initial_power_sequencer_setup(intel_dp);
5943
			vlv_initial_power_sequencer_setup(intel_dp);
6062
		else
5944
		else
6063
			intel_dp_init_panel_power_sequencer(dev, intel_dp);
5945
			intel_dp_init_panel_power_sequencer(dev, intel_dp);
6064
		pps_unlock(intel_dp);
5946
		pps_unlock(intel_dp);
Line 6065... Line 5947...
6065
	}
5947
	}
-
 
5948
 
-
 
5949
	ret = intel_dp_aux_init(intel_dp, intel_connector);
Line 6066... Line 5950...
6066
 
5950
	if (ret)
6067
	intel_dp_aux_init(intel_dp, intel_connector);
5951
		goto fail;
6068
 
5952
 
6069
	/* init MST on ports that can support it */
5953
	/* init MST on ports that can support it */
6070
	if (HAS_DP_MST(dev) &&
5954
	if (HAS_DP_MST(dev) &&
Line 6071... Line 5955...
6071
	    (port == PORT_B || port == PORT_C || port == PORT_D))
5955
	    (port == PORT_B || port == PORT_C || port == PORT_D))
6072
		intel_dp_mst_encoder_init(intel_dig_port,
-
 
6073
					  intel_connector->base.base.id);
-
 
6074
 
-
 
6075
	if (!intel_edp_init_connector(intel_dp, intel_connector)) {
-
 
6076
		drm_dp_aux_unregister(&intel_dp->aux);
-
 
6077
		if (is_edp(intel_dp)) {
-
 
6078
			cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
-
 
6079
			/*
-
 
6080
			 * vdd might still be enabled do to the delayed vdd off.
5956
		intel_dp_mst_encoder_init(intel_dig_port,
6081
			 * Make sure vdd is actually turned off here.
-
 
6082
			 */
-
 
6083
			pps_lock(intel_dp);
-
 
6084
			edp_panel_vdd_off_sync(intel_dp);
5957
					  intel_connector->base.base.id);
6085
			pps_unlock(intel_dp);
5958
 
6086
		}
5959
	if (!intel_edp_init_connector(intel_dp, intel_connector)) {
Line 6087... Line 5960...
6087
		drm_connector_unregister(connector);
5960
		intel_dp_aux_fini(intel_dp);
Line 6088... Line 5961...
6088
		drm_connector_cleanup(connector);
5961
		intel_dp_mst_encoder_cleanup(intel_dig_port);
Line 6101... Line 5974...
6101
	}
5974
	}
Line 6102... Line 5975...
6102
 
5975
 
Line 6103... Line 5976...
6103
	i915_debugfs_connector_add(connector);
5976
	i915_debugfs_connector_add(connector);
-
 
5977
 
-
 
5978
	return true;
-
 
5979
 
-
 
5980
fail:
-
 
5981
	if (is_edp(intel_dp)) {
-
 
5982
//		cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
-
 
5983
		/*
-
 
5984
		 * vdd might still be enabled do to the delayed vdd off.
-
 
5985
		 * Make sure vdd is actually turned off here.
-
 
5986
		 */
-
 
5987
		pps_lock(intel_dp);
-
 
5988
		edp_panel_vdd_off_sync(intel_dp);
-
 
5989
		pps_unlock(intel_dp);
-
 
5990
}
-
 
5991
	drm_connector_unregister(connector);
-
 
5992
	drm_connector_cleanup(connector);
6104
 
5993
 
Line -... Line 5994...
-
 
5994
	return false;
6105
	return true;
5995
}
6106
}
-
 
6107
 
5996
 
6108
bool intel_dp_init(struct drm_device *dev,
5997
void
6109
		   int output_reg,
5998
intel_dp_init(struct drm_device *dev,
6110
		   enum port port)
5999
	      i915_reg_t output_reg, enum port port)
6111
{
6000
{
6112
	struct drm_i915_private *dev_priv = dev->dev_private;
6001
	struct drm_i915_private *dev_priv = dev->dev_private;
6113
	struct intel_digital_port *intel_dig_port;
6002
	struct intel_digital_port *intel_dig_port;
Line 6114... Line 6003...
6114
	struct intel_encoder *intel_encoder;
6003
	struct intel_encoder *intel_encoder;
6115
	struct drm_encoder *encoder;
6004
	struct drm_encoder *encoder;
6116
	struct intel_connector *intel_connector;
6005
	struct intel_connector *intel_connector;
Line 6117... Line 6006...
6117
 
6006
 
6118
	intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
6007
	intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
6119
	if (!intel_dig_port)
6008
	if (!intel_dig_port)
Line 6120... Line 6009...
6120
		return false;
6009
		return;
6121
 
6010
 
Line 6122... Line 6011...
6122
	intel_connector = intel_connector_alloc();
6011
	intel_connector = intel_connector_alloc();
6123
	if (!intel_connector)
6012
	if (!intel_connector)
-
 
6013
		goto err_connector_alloc;
Line 6124... Line 6014...
6124
		goto err_connector_alloc;
6014
 
6125
 
6015
	intel_encoder = &intel_dig_port->base;
6126
	intel_encoder = &intel_dig_port->base;
6016
	encoder = &intel_encoder->base;
6127
	encoder = &intel_encoder->base;
6017
 
Line 6170... Line 6060...
6170
	dev_priv->hotplug.irq_port[port] = intel_dig_port;
6060
	dev_priv->hotplug.irq_port[port] = intel_dig_port;
Line 6171... Line 6061...
6171
 
6061
 
6172
	if (!intel_dp_init_connector(intel_dig_port, intel_connector))
6062
	if (!intel_dp_init_connector(intel_dig_port, intel_connector))
Line 6173... Line 6063...
6173
		goto err_init_connector;
6063
		goto err_init_connector;
Line 6174... Line 6064...
6174
 
6064
 
6175
	return true;
6065
	return;
-
 
6066
 
6176
 
6067
err_init_connector:
6177
err_init_connector:
6068
	drm_encoder_cleanup(encoder);
6178
	drm_encoder_cleanup(encoder);
6069
err_encoder_init:
-
 
6070
	kfree(intel_connector);
6179
	kfree(intel_connector);
6071
err_connector_alloc:
6180
err_connector_alloc:
6072
	kfree(intel_dig_port);
Line 6181... Line 6073...
6181
	kfree(intel_dig_port);
6073
 
6182
	return false;
6074
	return;
6183
}
6075
}