Subversion Repositories Kolibri OS

Rev

Rev 5078 | Rev 6104 | Go to most recent revision | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 5078 Rev 5271
Line 22... Line 22...
22
 */
22
 */
Line 23... Line 23...
23
 
23
 
24
#include 
24
#include 
25
#include "drmP.h"
25
#include "drmP.h"
-
 
26
#include "radeon.h"
26
#include "radeon.h"
27
#include "radeon_asic.h"
27
#include "radeon_ucode.h"
28
#include "radeon_ucode.h"
28
#include "cikd.h"
29
#include "cikd.h"
29
#include "r600_dpm.h"
30
#include "r600_dpm.h"
30
#include "ci_dpm.h"
31
#include "ci_dpm.h"
Line 43... Line 44...
43
#define VOLTAGE_VID_OFFSET_SCALE2    100
44
#define VOLTAGE_VID_OFFSET_SCALE2    100
Line 44... Line 45...
44
 
45
 
45
static const struct ci_pt_defaults defaults_hawaii_xt =
46
static const struct ci_pt_defaults defaults_hawaii_xt =
46
{
47
{
47
	1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0xB0000,
48
	1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0xB0000,
48
	{ 0x84,  0x0,   0x0,   0x7F,  0x0,   0x0,   0x5A,  0x60,  0x51,  0x8E,  0x79,  0x6B,  0x5F,  0x90,  0x79  },
49
	{ 0x2E,  0x00,  0x00,  0x88,  0x00,  0x00,  0x72,  0x60,  0x51,  0xA7,  0x79,  0x6B,  0x90,  0xBD,  0x79  },
49
	{ 0x1EA, 0x1EA, 0x1EA, 0x224, 0x224, 0x224, 0x24F, 0x24F, 0x24F, 0x28E, 0x28E, 0x28E, 0x2BC, 0x2BC, 0x2BC }
50
	{ 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 }
Line 50... Line 51...
50
};
51
};
51
 
52
 
52
static const struct ci_pt_defaults defaults_hawaii_pro =
53
static const struct ci_pt_defaults defaults_hawaii_pro =
53
{
54
{
54
	1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0x65062,
55
	1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0x65062,
55
	{ 0x93,  0x0,   0x0,   0x97,  0x0,   0x0,   0x6B,  0x60,  0x51,  0x95,  0x79,  0x6B,  0x5F,  0x90,  0x79  },
56
	{ 0x2E,  0x00,  0x00,  0x88,  0x00,  0x00,  0x72,  0x60,  0x51,  0xA7,  0x79,  0x6B,  0x90,  0xBD,  0x79  },
Line 56... Line 57...
56
	{ 0x1EA, 0x1EA, 0x1EA, 0x224, 0x224, 0x224, 0x24F, 0x24F, 0x24F, 0x28E, 0x28E, 0x28E, 0x2BC, 0x2BC, 0x2BC }
57
	{ 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 }
57
};
58
};
58
 
59
 
Line 160... Line 161...
160
	{ 0x60, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
161
	{ 0x60, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
161
	{ 0xFFFFFFFF }
162
	{ 0xFFFFFFFF }
162
};
163
};
Line 163... Line 164...
163
 
164
 
164
extern u8 rv770_get_memory_module_index(struct radeon_device *rdev);
-
 
165
extern void btc_get_max_clock_from_voltage_dependency_table(struct radeon_clock_voltage_dependency_table *table,
-
 
166
							    u32 *max_clock);
165
extern u8 rv770_get_memory_module_index(struct radeon_device *rdev);
167
extern int ni_copy_and_switch_arb_sets(struct radeon_device *rdev,
166
extern int ni_copy_and_switch_arb_sets(struct radeon_device *rdev,
168
				       u32 arb_freq_src, u32 arb_freq_dest);
167
				       u32 arb_freq_src, u32 arb_freq_dest);
169
extern u8 si_get_ddr3_mclk_frequency_ratio(u32 memory_clock);
168
extern u8 si_get_ddr3_mclk_frequency_ratio(u32 memory_clock);
170
extern u8 si_get_mclk_frequency_ratio(u32 memory_clock, bool strobe_mode);
169
extern u8 si_get_mclk_frequency_ratio(u32 memory_clock, bool strobe_mode);
Line 183... Line 182...
183
static int ci_set_power_limit(struct radeon_device *rdev, u32 n);
182
static int ci_set_power_limit(struct radeon_device *rdev, u32 n);
184
static int ci_set_overdrive_target_tdp(struct radeon_device *rdev,
183
static int ci_set_overdrive_target_tdp(struct radeon_device *rdev,
185
				       u32 target_tdp);
184
				       u32 target_tdp);
186
static int ci_update_uvd_dpm(struct radeon_device *rdev, bool gate);
185
static int ci_update_uvd_dpm(struct radeon_device *rdev, bool gate);
Line -... Line 186...
-
 
186
 
-
 
187
static PPSMC_Result ci_send_msg_to_smc_with_parameter(struct radeon_device *rdev,
-
 
188
						      PPSMC_Msg msg, u32 parameter);
187
 
189
 
188
static struct ci_power_info *ci_get_pi(struct radeon_device *rdev)
190
static struct ci_power_info *ci_get_pi(struct radeon_device *rdev)
189
{
191
{
Line 190... Line 192...
190
        struct ci_power_info *pi = rdev->pm.dpm.priv;
192
        struct ci_power_info *pi = rdev->pm.dpm.priv;
Line 248... Line 250...
248
	pi->caps_td_ramping = false;
250
	pi->caps_td_ramping = false;
249
	pi->caps_tcp_ramping = false;
251
	pi->caps_tcp_ramping = false;
Line 250... Line 252...
250
 
252
 
251
	if (pi->caps_power_containment) {
253
	if (pi->caps_power_containment) {
-
 
254
		pi->caps_cac = true;
-
 
255
		if (rdev->family == CHIP_HAWAII)
-
 
256
			pi->enable_bapm_feature = false;
252
		pi->caps_cac = true;
257
		else
253
		pi->enable_bapm_feature = true;
258
		pi->enable_bapm_feature = true;
254
		pi->enable_tdc_limit_feature = true;
259
		pi->enable_tdc_limit_feature = true;
255
		pi->enable_pkg_pwr_tracking_feature = true;
260
		pi->enable_pkg_pwr_tracking_feature = true;
256
	}
261
	}
Line 351... Line 356...
351
		pi->smc_powertune_table.TdcWaterfallCtl = pt_defaults->tdc_waterfall_ctl;
356
		pi->smc_powertune_table.TdcWaterfallCtl = pt_defaults->tdc_waterfall_ctl;
Line 352... Line 357...
352
 
357
 
353
	return 0;
358
	return 0;
Line -... Line 359...
-
 
359
}
-
 
360
 
-
 
361
static int ci_populate_fuzzy_fan(struct radeon_device *rdev)
-
 
362
{
-
 
363
	struct ci_power_info *pi = ci_get_pi(rdev);
-
 
364
 
-
 
365
	if ((rdev->pm.dpm.fan.fan_output_sensitivity & (1 << 15)) ||
-
 
366
	    (rdev->pm.dpm.fan.fan_output_sensitivity == 0))
-
 
367
		rdev->pm.dpm.fan.fan_output_sensitivity =
-
 
368
			rdev->pm.dpm.fan.default_fan_output_sensitivity;
-
 
369
 
-
 
370
	pi->smc_powertune_table.FuzzyFan_PwmSetDelta =
-
 
371
		cpu_to_be16(rdev->pm.dpm.fan.fan_output_sensitivity);
-
 
372
 
-
 
373
	return 0;
354
}
374
}
355
 
375
 
356
static int ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(struct radeon_device *rdev)
376
static int ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(struct radeon_device *rdev)
357
{
377
{
358
	struct ci_power_info *pi = ci_get_pi(rdev);
378
	struct ci_power_info *pi = ci_get_pi(rdev);
Line 476... Line 496...
476
		if (ret)
496
		if (ret)
477
			return ret;
497
			return ret;
478
		ret = ci_populate_dw8(rdev);
498
		ret = ci_populate_dw8(rdev);
479
		if (ret)
499
		if (ret)
480
			return ret;
500
			return ret;
-
 
501
		ret = ci_populate_fuzzy_fan(rdev);
-
 
502
		if (ret)
-
 
503
			return ret;
481
		ret = ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(rdev);
504
		ret = ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(rdev);
482
		if (ret)
505
		if (ret)
483
			return ret;
506
			return ret;
484
		ret = ci_populate_bapm_vddc_base_leakage_sidd(rdev);
507
		ret = ci_populate_bapm_vddc_base_leakage_sidd(rdev);
485
		if (ret)
508
		if (ret)
Line 689... Line 712...
689
	}
712
	}
Line 690... Line 713...
690
 
713
 
691
	return ret;
714
	return ret;
Line -... Line 715...
-
 
715
}
-
 
716
 
-
 
717
static int ci_enable_thermal_based_sclk_dpm(struct radeon_device *rdev,
-
 
718
					    bool enable)
-
 
719
{
-
 
720
	struct ci_power_info *pi = ci_get_pi(rdev);
-
 
721
	PPSMC_Result smc_result = PPSMC_Result_OK;
-
 
722
 
-
 
723
	if (pi->thermal_sclk_dpm_enabled) {
-
 
724
		if (enable)
-
 
725
			smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_ENABLE_THERMAL_DPM);
-
 
726
		else
-
 
727
			smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_DISABLE_THERMAL_DPM);
-
 
728
	}
-
 
729
 
-
 
730
	if (smc_result == PPSMC_Result_OK)
-
 
731
		return 0;
-
 
732
	else
-
 
733
		return -EINVAL;
692
}
734
}
693
 
735
 
694
static int ci_power_control_set_level(struct radeon_device *rdev)
736
static int ci_power_control_set_level(struct radeon_device *rdev)
695
{
737
{
696
	struct ci_power_info *pi = ci_get_pi(rdev);
738
	struct ci_power_info *pi = ci_get_pi(rdev);
697
	struct radeon_cac_tdp_table *cac_tdp_table =
739
	struct radeon_cac_tdp_table *cac_tdp_table =
698
		rdev->pm.dpm.dyn_state.cac_tdp_table;
740
		rdev->pm.dpm.dyn_state.cac_tdp_table;
699
	s32 adjust_percent;
741
	s32 adjust_percent;
700
	s32 target_tdp;
742
	s32 target_tdp;
Line 701... Line 743...
701
	int ret = 0;
743
	int ret = 0;
702
	bool adjust_polarity = false; /* ??? */
-
 
703
 
744
	bool adjust_polarity = false; /* ??? */
704
	if (pi->caps_power_containment &&
745
 
705
	    (pi->power_containment_features & POWERCONTAINMENT_FEATURE_BAPM)) {
746
	if (pi->caps_power_containment) {
706
		adjust_percent = adjust_polarity ?
747
		adjust_percent = adjust_polarity ?
707
			rdev->pm.dpm.tdp_adjustment : (-1 * rdev->pm.dpm.tdp_adjustment);
-
 
Line 708... Line 748...
708
		target_tdp = ((100 + adjust_percent) *
748
			rdev->pm.dpm.tdp_adjustment : (-1 * rdev->pm.dpm.tdp_adjustment);
709
			      (s32)cac_tdp_table->configurable_tdp) / 100;
749
		target_tdp = ((100 + adjust_percent) *
Line 710... Line 750...
710
		target_tdp *= 256;
750
			      (s32)cac_tdp_table->configurable_tdp) / 100;
Line 746... Line 786...
746
	struct ci_ps *ps = ci_get_ps(rps);
786
	struct ci_ps *ps = ci_get_ps(rps);
747
	struct ci_power_info *pi = ci_get_pi(rdev);
787
	struct ci_power_info *pi = ci_get_pi(rdev);
748
	struct radeon_clock_and_voltage_limits *max_limits;
788
	struct radeon_clock_and_voltage_limits *max_limits;
749
	bool disable_mclk_switching;
789
	bool disable_mclk_switching;
750
	u32 sclk, mclk;
790
	u32 sclk, mclk;
751
	u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc;
-
 
752
	int i;
791
	int i;
Line 753... Line 792...
753
 
792
 
754
	if (rps->vce_active) {
793
	if (rps->vce_active) {
755
		rps->evclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].evclk;
794
		rps->evclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].evclk;
Line 782... Line 821...
782
			if (ps->performance_levels[i].sclk > max_limits->sclk)
821
			if (ps->performance_levels[i].sclk > max_limits->sclk)
783
				ps->performance_levels[i].sclk = max_limits->sclk;
822
				ps->performance_levels[i].sclk = max_limits->sclk;
784
		}
823
		}
785
	}
824
	}
Line 786... Line -...
786
 
-
 
787
	/* limit clocks to max supported clocks based on voltage dependency tables */
-
 
788
	btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
-
 
789
							&max_sclk_vddc);
-
 
790
	btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
-
 
791
							&max_mclk_vddci);
-
 
792
	btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
-
 
793
							&max_mclk_vddc);
-
 
794
 
-
 
795
	for (i = 0; i < ps->performance_level_count; i++) {
-
 
796
		if (max_sclk_vddc) {
-
 
797
			if (ps->performance_levels[i].sclk > max_sclk_vddc)
-
 
798
				ps->performance_levels[i].sclk = max_sclk_vddc;
-
 
799
		}
-
 
800
		if (max_mclk_vddci) {
-
 
801
			if (ps->performance_levels[i].mclk > max_mclk_vddci)
-
 
802
				ps->performance_levels[i].mclk = max_mclk_vddci;
-
 
803
		}
-
 
804
		if (max_mclk_vddc) {
-
 
805
			if (ps->performance_levels[i].mclk > max_mclk_vddc)
-
 
806
				ps->performance_levels[i].mclk = max_mclk_vddc;
-
 
807
		}
-
 
808
	}
-
 
809
 
825
 
Line 810... Line 826...
810
	/* XXX validate the min clocks required for display */
826
	/* XXX validate the min clocks required for display */
811
 
827
 
812
	if (disable_mclk_switching) {
828
	if (disable_mclk_switching) {
Line 837... Line 853...
837
		if (ps->performance_levels[1].mclk < ps->performance_levels[0].mclk)
853
		if (ps->performance_levels[1].mclk < ps->performance_levels[0].mclk)
838
			ps->performance_levels[1].mclk = ps->performance_levels[0].mclk;
854
			ps->performance_levels[1].mclk = ps->performance_levels[0].mclk;
839
	}
855
	}
840
}
856
}
Line 841... Line 857...
841
 
857
 
842
static int ci_set_thermal_temperature_range(struct radeon_device *rdev,
858
static int ci_thermal_set_temperature_range(struct radeon_device *rdev,
843
					    int min_temp, int max_temp)
859
					    int min_temp, int max_temp)
844
{
860
{
845
	int low_temp = 0 * 1000;
861
	int low_temp = 0 * 1000;
846
	int high_temp = 255 * 1000;
862
	int high_temp = 255 * 1000;
Line 873... Line 889...
873
	rdev->pm.dpm.thermal.max_temp = high_temp;
889
	rdev->pm.dpm.thermal.max_temp = high_temp;
Line 874... Line 890...
874
 
890
 
875
	return 0;
891
	return 0;
Line -... Line 892...
-
 
892
}
-
 
893
 
-
 
894
static int ci_thermal_enable_alert(struct radeon_device *rdev,
-
 
895
				   bool enable)
-
 
896
{
-
 
897
	u32 thermal_int = RREG32_SMC(CG_THERMAL_INT);
-
 
898
	PPSMC_Result result;
-
 
899
 
-
 
900
	if (enable) {
-
 
901
		thermal_int &= ~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW);
-
 
902
		WREG32_SMC(CG_THERMAL_INT, thermal_int);
-
 
903
		rdev->irq.dpm_thermal = false;
-
 
904
		result = ci_send_msg_to_smc(rdev, PPSMC_MSG_Thermal_Cntl_Enable);
-
 
905
		if (result != PPSMC_Result_OK) {
-
 
906
			DRM_DEBUG_KMS("Could not enable thermal interrupts.\n");
-
 
907
			return -EINVAL;
-
 
908
		}
-
 
909
	} else {
-
 
910
		thermal_int |= THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW;
-
 
911
		WREG32_SMC(CG_THERMAL_INT, thermal_int);
-
 
912
		rdev->irq.dpm_thermal = true;
-
 
913
		result = ci_send_msg_to_smc(rdev, PPSMC_MSG_Thermal_Cntl_Disable);
-
 
914
		if (result != PPSMC_Result_OK) {
-
 
915
			DRM_DEBUG_KMS("Could not disable thermal interrupts.\n");
-
 
916
			return -EINVAL;
-
 
917
		}
-
 
918
	}
-
 
919
 
-
 
920
	return 0;
-
 
921
}
-
 
922
 
-
 
923
static void ci_fan_ctrl_set_static_mode(struct radeon_device *rdev, u32 mode)
-
 
924
{
-
 
925
	struct ci_power_info *pi = ci_get_pi(rdev);
-
 
926
	u32 tmp;
-
 
927
 
-
 
928
	if (pi->fan_ctrl_is_in_default_mode) {
-
 
929
		tmp = (RREG32_SMC(CG_FDO_CTRL2) & FDO_PWM_MODE_MASK) >> FDO_PWM_MODE_SHIFT;
-
 
930
		pi->fan_ctrl_default_mode = tmp;
-
 
931
		tmp = (RREG32_SMC(CG_FDO_CTRL2) & TMIN_MASK) >> TMIN_SHIFT;
-
 
932
		pi->t_min = tmp;
-
 
933
		pi->fan_ctrl_is_in_default_mode = false;
-
 
934
	}
-
 
935
 
-
 
936
	tmp = RREG32_SMC(CG_FDO_CTRL2) & ~TMIN_MASK;
-
 
937
	tmp |= TMIN(0);
-
 
938
	WREG32_SMC(CG_FDO_CTRL2, tmp);
-
 
939
 
-
 
940
	tmp = RREG32_SMC(CG_FDO_CTRL2) & ~FDO_PWM_MODE_MASK;
-
 
941
	tmp |= FDO_PWM_MODE(mode);
-
 
942
	WREG32_SMC(CG_FDO_CTRL2, tmp);
-
 
943
}
-
 
944
 
-
 
945
static int ci_thermal_setup_fan_table(struct radeon_device *rdev)
-
 
946
{
-
 
947
	struct ci_power_info *pi = ci_get_pi(rdev);
-
 
948
	SMU7_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
-
 
949
	u32 duty100;
-
 
950
	u32 t_diff1, t_diff2, pwm_diff1, pwm_diff2;
-
 
951
	u16 fdo_min, slope1, slope2;
-
 
952
	u32 reference_clock, tmp;
-
 
953
	int ret;
-
 
954
	u64 tmp64;
-
 
955
 
-
 
956
	if (!pi->fan_table_start) {
-
 
957
		rdev->pm.dpm.fan.ucode_fan_control = false;
-
 
958
		return 0;
-
 
959
	}
-
 
960
 
-
 
961
	duty100 = (RREG32_SMC(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT;
-
 
962
 
-
 
963
	if (duty100 == 0) {
-
 
964
		rdev->pm.dpm.fan.ucode_fan_control = false;
-
 
965
		return 0;
-
 
966
	}
-
 
967
 
-
 
968
	tmp64 = (u64)rdev->pm.dpm.fan.pwm_min * duty100;
-
 
969
	do_div(tmp64, 10000);
-
 
970
	fdo_min = (u16)tmp64;
-
 
971
 
-
 
972
	t_diff1 = rdev->pm.dpm.fan.t_med - rdev->pm.dpm.fan.t_min;
-
 
973
	t_diff2 = rdev->pm.dpm.fan.t_high - rdev->pm.dpm.fan.t_med;
-
 
974
 
-
 
975
	pwm_diff1 = rdev->pm.dpm.fan.pwm_med - rdev->pm.dpm.fan.pwm_min;
-
 
976
	pwm_diff2 = rdev->pm.dpm.fan.pwm_high - rdev->pm.dpm.fan.pwm_med;
-
 
977
 
-
 
978
	slope1 = (u16)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
-
 
979
	slope2 = (u16)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
-
 
980
 
-
 
981
	fan_table.TempMin = cpu_to_be16((50 + rdev->pm.dpm.fan.t_min) / 100);
-
 
982
	fan_table.TempMed = cpu_to_be16((50 + rdev->pm.dpm.fan.t_med) / 100);
-
 
983
	fan_table.TempMax = cpu_to_be16((50 + rdev->pm.dpm.fan.t_max) / 100);
-
 
984
 
-
 
985
	fan_table.Slope1 = cpu_to_be16(slope1);
-
 
986
	fan_table.Slope2 = cpu_to_be16(slope2);
-
 
987
 
-
 
988
	fan_table.FdoMin = cpu_to_be16(fdo_min);
-
 
989
 
-
 
990
	fan_table.HystDown = cpu_to_be16(rdev->pm.dpm.fan.t_hyst);
-
 
991
 
-
 
992
	fan_table.HystUp = cpu_to_be16(1);
-
 
993
 
-
 
994
	fan_table.HystSlope = cpu_to_be16(1);
-
 
995
 
-
 
996
	fan_table.TempRespLim = cpu_to_be16(5);
-
 
997
 
-
 
998
	reference_clock = radeon_get_xclk(rdev);
-
 
999
 
-
 
1000
	fan_table.RefreshPeriod = cpu_to_be32((rdev->pm.dpm.fan.cycle_delay *
-
 
1001
					       reference_clock) / 1600);
-
 
1002
 
-
 
1003
	fan_table.FdoMax = cpu_to_be16((u16)duty100);
-
 
1004
 
-
 
1005
	tmp = (RREG32_SMC(CG_MULT_THERMAL_CTRL) & TEMP_SEL_MASK) >> TEMP_SEL_SHIFT;
-
 
1006
	fan_table.TempSrc = (uint8_t)tmp;
-
 
1007
 
-
 
1008
	ret = ci_copy_bytes_to_smc(rdev,
-
 
1009
				   pi->fan_table_start,
-
 
1010
				   (u8 *)(&fan_table),
-
 
1011
				   sizeof(fan_table),
-
 
1012
				   pi->sram_end);
-
 
1013
 
-
 
1014
	if (ret) {
-
 
1015
		DRM_ERROR("Failed to load fan table to the SMC.");
-
 
1016
		rdev->pm.dpm.fan.ucode_fan_control = false;
-
 
1017
	}
-
 
1018
 
-
 
1019
	return 0;
-
 
1020
}
-
 
1021
 
-
 
1022
static int ci_fan_ctrl_start_smc_fan_control(struct radeon_device *rdev)
-
 
1023
{
-
 
1024
	struct ci_power_info *pi = ci_get_pi(rdev);
-
 
1025
	PPSMC_Result ret;
-
 
1026
 
-
 
1027
	if (pi->caps_od_fuzzy_fan_control_support) {
-
 
1028
		ret = ci_send_msg_to_smc_with_parameter(rdev,
-
 
1029
							PPSMC_StartFanControl,
-
 
1030
							FAN_CONTROL_FUZZY);
-
 
1031
		if (ret != PPSMC_Result_OK)
-
 
1032
			return -EINVAL;
-
 
1033
		ret = ci_send_msg_to_smc_with_parameter(rdev,
-
 
1034
							PPSMC_MSG_SetFanPwmMax,
-
 
1035
							rdev->pm.dpm.fan.default_max_fan_pwm);
-
 
1036
		if (ret != PPSMC_Result_OK)
-
 
1037
			return -EINVAL;
-
 
1038
	} else {
-
 
1039
		ret = ci_send_msg_to_smc_with_parameter(rdev,
-
 
1040
							PPSMC_StartFanControl,
-
 
1041
							FAN_CONTROL_TABLE);
-
 
1042
		if (ret != PPSMC_Result_OK)
-
 
1043
			return -EINVAL;
-
 
1044
	}
-
 
1045
 
-
 
1046
	return 0;
-
 
1047
}
-
 
1048
 
-
 
1049
#if 0
-
 
1050
static int ci_fan_ctrl_stop_smc_fan_control(struct radeon_device *rdev)
-
 
1051
{
-
 
1052
	PPSMC_Result ret;
-
 
1053
 
-
 
1054
	ret = ci_send_msg_to_smc(rdev, PPSMC_StopFanControl);
-
 
1055
	if (ret == PPSMC_Result_OK)
-
 
1056
		return 0;
-
 
1057
	else
-
 
1058
		return -EINVAL;
-
 
1059
}
-
 
1060
 
-
 
1061
static int ci_fan_ctrl_get_fan_speed_percent(struct radeon_device *rdev,
-
 
1062
					     u32 *speed)
-
 
1063
{
-
 
1064
	u32 duty, duty100;
-
 
1065
	u64 tmp64;
-
 
1066
 
-
 
1067
	if (rdev->pm.no_fan)
-
 
1068
		return -ENOENT;
-
 
1069
 
-
 
1070
	duty100 = (RREG32_SMC(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT;
-
 
1071
	duty = (RREG32_SMC(CG_THERMAL_STATUS) & FDO_PWM_DUTY_MASK) >> FDO_PWM_DUTY_SHIFT;
-
 
1072
 
-
 
1073
	if (duty100 == 0)
-
 
1074
		return -EINVAL;
-
 
1075
 
-
 
1076
	tmp64 = (u64)duty * 100;
-
 
1077
	do_div(tmp64, duty100);
-
 
1078
	*speed = (u32)tmp64;
-
 
1079
 
-
 
1080
	if (*speed > 100)
-
 
1081
		*speed = 100;
-
 
1082
 
-
 
1083
	return 0;
-
 
1084
}
-
 
1085
 
-
 
1086
static int ci_fan_ctrl_set_fan_speed_percent(struct radeon_device *rdev,
-
 
1087
					     u32 speed)
-
 
1088
{
-
 
1089
	u32 tmp;
-
 
1090
	u32 duty, duty100;
-
 
1091
	u64 tmp64;
-
 
1092
 
-
 
1093
	if (rdev->pm.no_fan)
-
 
1094
		return -ENOENT;
-
 
1095
 
-
 
1096
	if (speed > 100)
-
 
1097
		return -EINVAL;
-
 
1098
 
-
 
1099
	if (rdev->pm.dpm.fan.ucode_fan_control)
-
 
1100
		ci_fan_ctrl_stop_smc_fan_control(rdev);
-
 
1101
 
-
 
1102
	duty100 = (RREG32_SMC(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT;
-
 
1103
 
-
 
1104
	if (duty100 == 0)
-
 
1105
		return -EINVAL;
-
 
1106
 
-
 
1107
	tmp64 = (u64)speed * duty100;
-
 
1108
	do_div(tmp64, 100);
-
 
1109
	duty = (u32)tmp64;
-
 
1110
 
-
 
1111
	tmp = RREG32_SMC(CG_FDO_CTRL0) & ~FDO_STATIC_DUTY_MASK;
-
 
1112
	tmp |= FDO_STATIC_DUTY(duty);
-
 
1113
	WREG32_SMC(CG_FDO_CTRL0, tmp);
-
 
1114
 
-
 
1115
	ci_fan_ctrl_set_static_mode(rdev, FDO_PWM_MODE_STATIC);
-
 
1116
 
-
 
1117
	return 0;
-
 
1118
}
-
 
1119
 
-
 
1120
static int ci_fan_ctrl_get_fan_speed_rpm(struct radeon_device *rdev,
-
 
1121
					 u32 *speed)
-
 
1122
{
-
 
1123
	u32 tach_period;
-
 
1124
	u32 xclk = radeon_get_xclk(rdev);
-
 
1125
 
-
 
1126
	if (rdev->pm.no_fan)
-
 
1127
		return -ENOENT;
-
 
1128
 
-
 
1129
	if (rdev->pm.fan_pulses_per_revolution == 0)
-
 
1130
		return -ENOENT;
-
 
1131
 
-
 
1132
	tach_period = (RREG32_SMC(CG_TACH_STATUS) & TACH_PERIOD_MASK) >> TACH_PERIOD_SHIFT;
-
 
1133
	if (tach_period == 0)
-
 
1134
		return -ENOENT;
-
 
1135
 
-
 
1136
	*speed = 60 * xclk * 10000 / tach_period;
-
 
1137
 
-
 
1138
	return 0;
-
 
1139
}
-
 
1140
 
-
 
1141
static int ci_fan_ctrl_set_fan_speed_rpm(struct radeon_device *rdev,
-
 
1142
					 u32 speed)
-
 
1143
{
-
 
1144
	u32 tach_period, tmp;
-
 
1145
	u32 xclk = radeon_get_xclk(rdev);
-
 
1146
 
-
 
1147
	if (rdev->pm.no_fan)
-
 
1148
		return -ENOENT;
-
 
1149
 
-
 
1150
	if (rdev->pm.fan_pulses_per_revolution == 0)
-
 
1151
		return -ENOENT;
-
 
1152
 
-
 
1153
	if ((speed < rdev->pm.fan_min_rpm) ||
-
 
1154
	    (speed > rdev->pm.fan_max_rpm))
-
 
1155
		return -EINVAL;
-
 
1156
 
-
 
1157
	if (rdev->pm.dpm.fan.ucode_fan_control)
-
 
1158
		ci_fan_ctrl_stop_smc_fan_control(rdev);
-
 
1159
 
-
 
1160
	tach_period = 60 * xclk * 10000 / (8 * speed);
-
 
1161
	tmp = RREG32_SMC(CG_TACH_CTRL) & ~TARGET_PERIOD_MASK;
-
 
1162
	tmp |= TARGET_PERIOD(tach_period);
-
 
1163
	WREG32_SMC(CG_TACH_CTRL, tmp);
-
 
1164
 
-
 
1165
	ci_fan_ctrl_set_static_mode(rdev, FDO_PWM_MODE_STATIC_RPM);
-
 
1166
 
-
 
1167
	return 0;
-
 
1168
}
-
 
1169
#endif
-
 
1170
 
-
 
1171
static void ci_fan_ctrl_set_default_mode(struct radeon_device *rdev)
-
 
1172
{
-
 
1173
	struct ci_power_info *pi = ci_get_pi(rdev);
-
 
1174
	u32 tmp;
-
 
1175
 
-
 
1176
	if (!pi->fan_ctrl_is_in_default_mode) {
-
 
1177
		tmp = RREG32_SMC(CG_FDO_CTRL2) & ~FDO_PWM_MODE_MASK;
-
 
1178
		tmp |= FDO_PWM_MODE(pi->fan_ctrl_default_mode);
-
 
1179
		WREG32_SMC(CG_FDO_CTRL2, tmp);
-
 
1180
 
-
 
1181
		tmp = RREG32_SMC(CG_FDO_CTRL2) & ~TMIN_MASK;
-
 
1182
		tmp |= TMIN(pi->t_min);
-
 
1183
		WREG32_SMC(CG_FDO_CTRL2, tmp);
-
 
1184
		pi->fan_ctrl_is_in_default_mode = true;
-
 
1185
	}
-
 
1186
}
-
 
1187
 
-
 
1188
static void ci_thermal_start_smc_fan_control(struct radeon_device *rdev)
-
 
1189
{
-
 
1190
	if (rdev->pm.dpm.fan.ucode_fan_control) {
-
 
1191
		ci_fan_ctrl_start_smc_fan_control(rdev);
-
 
1192
		ci_fan_ctrl_set_static_mode(rdev, FDO_PWM_MODE_STATIC);
-
 
1193
	}
-
 
1194
}
-
 
1195
 
-
 
1196
static void ci_thermal_initialize(struct radeon_device *rdev)
-
 
1197
{
-
 
1198
	u32 tmp;
-
 
1199
 
-
 
1200
	if (rdev->pm.fan_pulses_per_revolution) {
-
 
1201
		tmp = RREG32_SMC(CG_TACH_CTRL) & ~EDGE_PER_REV_MASK;
-
 
1202
		tmp |= EDGE_PER_REV(rdev->pm.fan_pulses_per_revolution -1);
-
 
1203
		WREG32_SMC(CG_TACH_CTRL, tmp);
-
 
1204
	}
-
 
1205
 
-
 
1206
	tmp = RREG32_SMC(CG_FDO_CTRL2) & ~TACH_PWM_RESP_RATE_MASK;
-
 
1207
	tmp |= TACH_PWM_RESP_RATE(0x28);
-
 
1208
	WREG32_SMC(CG_FDO_CTRL2, tmp);
-
 
1209
}
-
 
1210
 
-
 
1211
static int ci_thermal_start_thermal_controller(struct radeon_device *rdev)
-
 
1212
{
-
 
1213
	int ret;
-
 
1214
 
-
 
1215
	ci_thermal_initialize(rdev);
-
 
1216
	ret = ci_thermal_set_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
-
 
1217
	if (ret)
-
 
1218
		return ret;
-
 
1219
	ret = ci_thermal_enable_alert(rdev, true);
-
 
1220
	if (ret)
-
 
1221
		return ret;
-
 
1222
	if (rdev->pm.dpm.fan.ucode_fan_control) {
-
 
1223
		ret = ci_thermal_setup_fan_table(rdev);
-
 
1224
		if (ret)
-
 
1225
			return ret;
-
 
1226
		ci_thermal_start_smc_fan_control(rdev);
-
 
1227
	}
-
 
1228
 
-
 
1229
	return 0;
-
 
1230
}
-
 
1231
 
-
 
1232
static void ci_thermal_stop_thermal_controller(struct radeon_device *rdev)
-
 
1233
{
-
 
1234
	if (!rdev->pm.no_fan)
-
 
1235
		ci_fan_ctrl_set_default_mode(rdev);
876
}
1236
}
877
 
1237
 
878
#if 0
1238
#if 0
879
static int ci_read_smc_soft_register(struct radeon_device *rdev,
1239
static int ci_read_smc_soft_register(struct radeon_device *rdev,
880
				     u16 reg_offset, u32 *value)
1240
				     u16 reg_offset, u32 *value)
Line 1276... Line 1636...
1276
{
1636
{
1277
	struct ci_power_info *pi = ci_get_pi(rdev);
1637
	struct ci_power_info *pi = ci_get_pi(rdev);
Line 1278... Line 1638...
1278
 
1638
 
1279
	if (!pi->sclk_dpm_key_disabled) {
1639
	if (!pi->sclk_dpm_key_disabled) {
1280
		PPSMC_Result smc_result =
1640
		PPSMC_Result smc_result =
1281
			ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, n);
1641
			ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SCLKDPM_SetEnabledMask, 1 << n);
1282
		if (smc_result != PPSMC_Result_OK)
1642
		if (smc_result != PPSMC_Result_OK)
1283
			return -EINVAL;
1643
			return -EINVAL;
Line 1284... Line 1644...
1284
	}
1644
	}
Line 1290... Line 1650...
1290
{
1650
{
1291
	struct ci_power_info *pi = ci_get_pi(rdev);
1651
	struct ci_power_info *pi = ci_get_pi(rdev);
Line 1292... Line 1652...
1292
 
1652
 
1293
	if (!pi->mclk_dpm_key_disabled) {
1653
	if (!pi->mclk_dpm_key_disabled) {
1294
		PPSMC_Result smc_result =
1654
		PPSMC_Result smc_result =
1295
			ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_MCLKDPM_ForceState, n);
1655
			ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_MCLKDPM_SetEnabledMask, 1 << n);
1296
		if (smc_result != PPSMC_Result_OK)
1656
		if (smc_result != PPSMC_Result_OK)
1297
			return -EINVAL;
1657
			return -EINVAL;
Line 1298... Line 1658...
1298
	}
1658
	}
Line 2065... Line 2425...
2065
		return 0;
2425
		return 0;
Line 2066... Line 2426...
2066
 
2426
 
2067
	return ni_copy_and_switch_arb_sets(rdev, tmp, MC_CG_ARB_FREQ_F0);
2427
	return ni_copy_and_switch_arb_sets(rdev, tmp, MC_CG_ARB_FREQ_F0);
Line -... Line 2428...
-
 
2428
}
-
 
2429
 
-
 
2430
static void ci_register_patching_mc_arb(struct radeon_device *rdev,
-
 
2431
					const u32 engine_clock,
-
 
2432
					const u32 memory_clock,
-
 
2433
					u32 *dram_timimg2)
-
 
2434
{
-
 
2435
	bool patch;
-
 
2436
	u32 tmp, tmp2;
-
 
2437
 
-
 
2438
	tmp = RREG32(MC_SEQ_MISC0);
-
 
2439
	patch = ((tmp & 0x0000f00) == 0x300) ? true : false;
-
 
2440
 
-
 
2441
	if (patch &&
-
 
2442
	    ((rdev->pdev->device == 0x67B0) ||
-
 
2443
	     (rdev->pdev->device == 0x67B1))) {
-
 
2444
		if ((memory_clock > 100000) && (memory_clock <= 125000)) {
-
 
2445
			tmp2 = (((0x31 * engine_clock) / 125000) - 1) & 0xff;
-
 
2446
			*dram_timimg2 &= ~0x00ff0000;
-
 
2447
			*dram_timimg2 |= tmp2 << 16;
-
 
2448
		} else if ((memory_clock > 125000) && (memory_clock <= 137500)) {
-
 
2449
			tmp2 = (((0x36 * engine_clock) / 137500) - 1) & 0xff;
-
 
2450
			*dram_timimg2 &= ~0x00ff0000;
-
 
2451
			*dram_timimg2 |= tmp2 << 16;
-
 
2452
		}
-
 
2453
	}
-
 
2454
}
2068
}
2455
 
2069
 
2456
 
2070
static int ci_populate_memory_timing_parameters(struct radeon_device *rdev,
2457
static int ci_populate_memory_timing_parameters(struct radeon_device *rdev,
2071
						u32 sclk,
2458
						u32 sclk,
2072
						u32 mclk,
2459
						u32 mclk,
Line 2080... Line 2467...
2080
 
2467
 
2081
	dram_timing  = RREG32(MC_ARB_DRAM_TIMING);
2468
	dram_timing  = RREG32(MC_ARB_DRAM_TIMING);
2082
	dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2);
2469
	dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2);
Line -... Line 2470...
-
 
2470
	burst_time = RREG32(MC_ARB_BURST_TIME) & STATE0_MASK;
-
 
2471
 
2083
	burst_time = RREG32(MC_ARB_BURST_TIME) & STATE0_MASK;
2472
	ci_register_patching_mc_arb(rdev, sclk, mclk, &dram_timing2);
2084
 
2473
 
2085
	arb_regs->McArbDramTiming  = cpu_to_be32(dram_timing);
2474
	arb_regs->McArbDramTiming  = cpu_to_be32(dram_timing);
Line 2086... Line 2475...
2086
	arb_regs->McArbDramTiming2 = cpu_to_be32(dram_timing2);
2475
	arb_regs->McArbDramTiming2 = cpu_to_be32(dram_timing2);
Line 2374... Line 2763...
2374
		struct radeon_atom_ss ss;
2763
		struct radeon_atom_ss ss;
2375
		u32 freq_nom;
2764
		u32 freq_nom;
2376
		u32 tmp;
2765
		u32 tmp;
2377
		u32 reference_clock = rdev->clock.mpll.reference_freq;
2766
		u32 reference_clock = rdev->clock.mpll.reference_freq;
Line 2378... Line 2767...
2378
 
2767
 
2379
		if (pi->mem_gddr5)
2768
		if (mpll_param.qdr == 1)
2380
			freq_nom = memory_clock * 4;
2769
			freq_nom = memory_clock * 4 * (1 << mpll_param.post_div);
2381
		else
2770
		else
Line 2382... Line 2771...
2382
			freq_nom = memory_clock * 2;
2771
			freq_nom = memory_clock * 2 * (1 << mpll_param.post_div);
2383
 
2772
 
2384
		tmp = (freq_nom / reference_clock);
2773
		tmp = (freq_nom / reference_clock);
2385
		tmp = tmp * tmp;
2774
		tmp = tmp * tmp;
Line 2457... Line 2846...
2457
						      &rdev->pm.dpm.dyn_state.phase_shedding_limits_table,
2846
						      &rdev->pm.dpm.dyn_state.phase_shedding_limits_table,
2458
						      memory_clock,
2847
						      memory_clock,
2459
						      &memory_level->MinVddcPhases);
2848
						      &memory_level->MinVddcPhases);
Line 2460... Line 2849...
2460
 
2849
 
2461
	memory_level->EnabledForThrottle = 1;
-
 
2462
	memory_level->EnabledForActivity = 1;
2850
	memory_level->EnabledForThrottle = 1;
2463
	memory_level->UpH = 0;
2851
	memory_level->UpH = 0;
2464
	memory_level->DownH = 100;
2852
	memory_level->DownH = 100;
2465
	memory_level->VoltageDownH = 0;
2853
	memory_level->VoltageDownH = 0;
Line 2790... Line 3178...
2790
 
3178
 
Line 2791... Line 3179...
2791
	graphic_level->ActivityLevel = sclk_activity_level_t;
3179
	graphic_level->ActivityLevel = sclk_activity_level_t;
2792
 
3180
 
2793
	graphic_level->CcPwrDynRm = 0;
-
 
2794
	graphic_level->CcPwrDynRm1 = 0;
3181
	graphic_level->CcPwrDynRm = 0;
2795
	graphic_level->EnabledForActivity = 1;
3182
	graphic_level->CcPwrDynRm1 = 0;
2796
	graphic_level->EnabledForThrottle = 1;
3183
	graphic_level->EnabledForThrottle = 1;
2797
	graphic_level->UpH = 0;
3184
	graphic_level->UpH = 0;
2798
	graphic_level->DownH = 0;
3185
	graphic_level->DownH = 0;
Line 2839... Line 3226...
2839
						       dpm_table->sclk_table.dpm_levels[i].value,
3226
						       dpm_table->sclk_table.dpm_levels[i].value,
2840
						       (u16)pi->activity_target[i],
3227
						       (u16)pi->activity_target[i],
2841
						       &pi->smc_state_table.GraphicsLevel[i]);
3228
						       &pi->smc_state_table.GraphicsLevel[i]);
2842
		if (ret)
3229
		if (ret)
2843
			return ret;
3230
			return ret;
-
 
3231
		if (i > 1)
-
 
3232
			pi->smc_state_table.GraphicsLevel[i].DeepSleepDivId = 0;
2844
		if (i == (dpm_table->sclk_table.count - 1))
3233
		if (i == (dpm_table->sclk_table.count - 1))
2845
			pi->smc_state_table.GraphicsLevel[i].DisplayWatermark =
3234
			pi->smc_state_table.GraphicsLevel[i].DisplayWatermark =
2846
				PPSMC_DISPLAY_WATERMARK_HIGH;
3235
				PPSMC_DISPLAY_WATERMARK_HIGH;
2847
	}
3236
	}
-
 
3237
	pi->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1;
Line 2848... Line 3238...
2848
 
3238
 
2849
	pi->smc_state_table.GraphicsDpmLevelCount = (u8)dpm_table->sclk_table.count;
3239
	pi->smc_state_table.GraphicsDpmLevelCount = (u8)dpm_table->sclk_table.count;
2850
	pi->dpm_level_enable_mask.sclk_dpm_enable_mask =
3240
	pi->dpm_level_enable_mask.sclk_dpm_enable_mask =
Line 2886... Line 3276...
2886
						      &pi->smc_state_table.MemoryLevel[i]);
3276
						      &pi->smc_state_table.MemoryLevel[i]);
2887
		if (ret)
3277
		if (ret)
2888
			return ret;
3278
			return ret;
2889
	}
3279
	}
Line -... Line 3280...
-
 
3280
 
-
 
3281
	pi->smc_state_table.MemoryLevel[0].EnabledForActivity = 1;
-
 
3282
 
-
 
3283
	if ((dpm_table->mclk_table.count >= 2) &&
-
 
3284
	    ((rdev->pdev->device == 0x67B0) || (rdev->pdev->device == 0x67B1))) {
-
 
3285
		pi->smc_state_table.MemoryLevel[1].MinVddc =
-
 
3286
			pi->smc_state_table.MemoryLevel[0].MinVddc;
-
 
3287
		pi->smc_state_table.MemoryLevel[1].MinVddcPhases =
-
 
3288
			pi->smc_state_table.MemoryLevel[0].MinVddcPhases;
-
 
3289
	}
2890
 
3290
 
Line 2891... Line 3291...
2891
	pi->smc_state_table.MemoryLevel[0].ActivityLevel = cpu_to_be16(0x1F);
3291
	pi->smc_state_table.MemoryLevel[0].ActivityLevel = cpu_to_be16(0x1F);
2892
 
3292
 
2893
	pi->smc_state_table.MemoryDpmLevelCount = (u8)dpm_table->mclk_table.count;
3293
	pi->smc_state_table.MemoryDpmLevelCount = (u8)dpm_table->mclk_table.count;
Line 2942... Line 3342...
2942
 
3342
 
2943
	ci_reset_single_dpm_table(rdev,
3343
	ci_reset_single_dpm_table(rdev,
2944
				  &pi->dpm_table.pcie_speed_table,
3344
				  &pi->dpm_table.pcie_speed_table,
Line -... Line 3345...
-
 
3345
				  SMU7_MAX_LEVELS_LINK);
-
 
3346
 
-
 
3347
	if (rdev->family == CHIP_BONAIRE)
-
 
3348
		ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 0,
-
 
3349
					  pi->pcie_gen_powersaving.min,
2945
				  SMU7_MAX_LEVELS_LINK);
3350
					  pi->pcie_lane_powersaving.max);
2946
 
3351
	else
2947
	ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 0,
3352
	ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 0,
2948
				  pi->pcie_gen_powersaving.min,
3353
				  pi->pcie_gen_powersaving.min,
2949
				  pi->pcie_lane_powersaving.min);
3354
				  pi->pcie_lane_powersaving.min);
Line 3011... Line 3416...
3011
		if ((i == 0) ||
3416
		if ((i == 0) ||
3012
		    (pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count-1].value !=
3417
		    (pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count-1].value !=
3013
		     allowed_sclk_vddc_table->entries[i].clk)) {
3418
		     allowed_sclk_vddc_table->entries[i].clk)) {
3014
			pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].value =
3419
			pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].value =
3015
				allowed_sclk_vddc_table->entries[i].clk;
3420
				allowed_sclk_vddc_table->entries[i].clk;
3016
			pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].enabled = true;
3421
			pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].enabled =
-
 
3422
				(i == 0) ? true : false;
3017
			pi->dpm_table.sclk_table.count++;
3423
			pi->dpm_table.sclk_table.count++;
3018
		}
3424
		}
3019
	}
3425
	}
Line 3020... Line 3426...
3020
 
3426
 
Line 3023... Line 3429...
3023
		if ((i==0) ||
3429
		if ((i == 0) ||
3024
		    (pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count-1].value !=
3430
		    (pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count-1].value !=
3025
		     allowed_mclk_table->entries[i].clk)) {
3431
		     allowed_mclk_table->entries[i].clk)) {
3026
			pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].value =
3432
			pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].value =
3027
				allowed_mclk_table->entries[i].clk;
3433
				allowed_mclk_table->entries[i].clk;
3028
			pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].enabled = true;
3434
			pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].enabled =
-
 
3435
				(i == 0) ? true : false;
3029
			pi->dpm_table.mclk_table.count++;
3436
			pi->dpm_table.mclk_table.count++;
3030
		}
3437
		}
3031
	}
3438
	}
Line 3032... Line 3439...
3032
 
3439
 
Line 3189... Line 3596...
3189
	table->MemoryInterval = 1;
3596
	table->MemoryInterval = 1;
3190
	table->VoltageResponseTime = 0;
3597
	table->VoltageResponseTime = 0;
3191
	table->VddcVddciDelta = 4000;
3598
	table->VddcVddciDelta = 4000;
3192
	table->PhaseResponseTime = 0;
3599
	table->PhaseResponseTime = 0;
3193
	table->MemoryThermThrottleEnable = 1;
3600
	table->MemoryThermThrottleEnable = 1;
3194
	table->PCIeBootLinkLevel = 0;
3601
	table->PCIeBootLinkLevel = pi->dpm_table.pcie_speed_table.count - 1;
3195
	table->PCIeGenInterval = 1;
3602
	table->PCIeGenInterval = 1;
3196
	if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2)
3603
	if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2)
3197
		table->SVI2Enable  = 1;
3604
		table->SVI2Enable  = 1;
3198
	else
3605
	else
3199
		table->SVI2Enable  = 0;
3606
		table->SVI2Enable  = 0;
Line 3343... Line 3750...
3343
static int ci_upload_dpm_level_enable_mask(struct radeon_device *rdev)
3750
static int ci_upload_dpm_level_enable_mask(struct radeon_device *rdev)
3344
{
3751
{
3345
	struct ci_power_info *pi = ci_get_pi(rdev);
3752
	struct ci_power_info *pi = ci_get_pi(rdev);
3346
	PPSMC_Result result;
3753
	PPSMC_Result result;
Line -... Line 3754...
-
 
3754
 
-
 
3755
	ci_apply_disp_minimum_voltage_request(rdev);
3347
 
3756
 
3348
	if (!pi->sclk_dpm_key_disabled) {
3757
	if (!pi->sclk_dpm_key_disabled) {
3349
		if (pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
3758
		if (pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
3350
			result = ci_send_msg_to_smc_with_parameter(rdev,
3759
			result = ci_send_msg_to_smc_with_parameter(rdev,
3351
								   PPSMC_MSG_SCLKDPM_SetEnabledMask,
3760
								   PPSMC_MSG_SCLKDPM_SetEnabledMask,
Line 3362... Line 3771...
3362
								   pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
3771
								   pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
3363
			if (result != PPSMC_Result_OK)
3772
			if (result != PPSMC_Result_OK)
3364
				return -EINVAL;
3773
				return -EINVAL;
3365
		}
3774
		}
3366
	}
3775
	}
3367
 
3776
#if 0
3368
	if (!pi->pcie_dpm_key_disabled) {
3777
	if (!pi->pcie_dpm_key_disabled) {
3369
		if (pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
3778
		if (pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
3370
			result = ci_send_msg_to_smc_with_parameter(rdev,
3779
			result = ci_send_msg_to_smc_with_parameter(rdev,
3371
								   PPSMC_MSG_PCIeDPM_SetEnabledMask,
3780
								   PPSMC_MSG_PCIeDPM_SetEnabledMask,
3372
								   pi->dpm_level_enable_mask.pcie_dpm_enable_mask);
3781
								   pi->dpm_level_enable_mask.pcie_dpm_enable_mask);
3373
			if (result != PPSMC_Result_OK)
3782
			if (result != PPSMC_Result_OK)
3374
				return -EINVAL;
3783
				return -EINVAL;
3375
		}
3784
		}
3376
	}
3785
	}
3377
 
-
 
3378
	ci_apply_disp_minimum_voltage_request(rdev);
-
 
3379
 
3786
#endif
3380
	return 0;
3787
	return 0;
3381
}
3788
}
Line 3382... Line 3789...
3382
 
3789
 
3383
static void ci_find_dpm_states_clocks_in_dpm_table(struct radeon_device *rdev,
3790
static void ci_find_dpm_states_clocks_in_dpm_table(struct radeon_device *rdev,
Line 3400... Line 3807...
3400
 
3807
 
3401
	if (i >= sclk_table->count) {
3808
	if (i >= sclk_table->count) {
3402
		pi->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
3809
		pi->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
3403
	} else {
3810
	} else {
3404
		/* XXX check display min clock requirements */
3811
		/* XXX check display min clock requirements */
3405
		if (0 != CISLAND_MINIMUM_ENGINE_CLOCK)
3812
		if (CISLAND_MINIMUM_ENGINE_CLOCK != CISLAND_MINIMUM_ENGINE_CLOCK)
3406
			pi->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK;
3813
			pi->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK;
Line 3407... Line 3814...
3407
	}
3814
	}
3408
 
3815
 
Line 3730... Line 4137...
3730
 
4137
 
3731
int ci_dpm_force_performance_level(struct radeon_device *rdev,
4138
int ci_dpm_force_performance_level(struct radeon_device *rdev,
3732
				   enum radeon_dpm_forced_level level)
4139
				   enum radeon_dpm_forced_level level)
3733
{
4140
{
3734
	struct ci_power_info *pi = ci_get_pi(rdev);
-
 
3735
	PPSMC_Result smc_result;
4141
	struct ci_power_info *pi = ci_get_pi(rdev);
3736
	u32 tmp, levels, i;
4142
	u32 tmp, levels, i;
Line 3737... Line 4143...
3737
	int ret;
4143
	int ret;
3738
 
4144
 
3739
	if (level == RADEON_DPM_FORCED_LEVEL_HIGH) {
4145
	if (level == RADEON_DPM_FORCED_LEVEL_HIGH) {
3740
		if ((!pi->sclk_dpm_key_disabled) &&
4146
		if ((!pi->pcie_dpm_key_disabled) &&
3741
		    pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
4147
		    pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
3742
			levels = 0;
4148
			levels = 0;
3743
			tmp = pi->dpm_level_enable_mask.sclk_dpm_enable_mask;
4149
			tmp = pi->dpm_level_enable_mask.pcie_dpm_enable_mask;
3744
			while (tmp >>= 1)
4150
			while (tmp >>= 1)
3745
				levels++;
4151
				levels++;
3746
			if (levels) {
4152
			if (levels) {
3747
				ret = ci_dpm_force_state_sclk(rdev, levels);
4153
				ret = ci_dpm_force_state_pcie(rdev, level);
3748
				if (ret)
4154
				if (ret)
3749
					return ret;
4155
					return ret;
3750
				for (i = 0; i < rdev->usec_timeout; i++) {
4156
				for (i = 0; i < rdev->usec_timeout; i++) {
3751
					tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) &
4157
					tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX_1) &
3752
					       CURR_SCLK_INDEX_MASK) >> CURR_SCLK_INDEX_SHIFT;
4158
					       CURR_PCIE_INDEX_MASK) >> CURR_PCIE_INDEX_SHIFT;
3753
					if (tmp == levels)
4159
					if (tmp == levels)
3754
						break;
4160
						break;
3755
					udelay(1);
4161
					udelay(1);
3756
				}
4162
				}
3757
			}
4163
			}
3758
		}
4164
		}
3759
		if ((!pi->mclk_dpm_key_disabled) &&
4165
		if ((!pi->sclk_dpm_key_disabled) &&
3760
		    pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
4166
		    pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
3761
			levels = 0;
4167
			levels = 0;
3762
			tmp = pi->dpm_level_enable_mask.mclk_dpm_enable_mask;
4168
			tmp = pi->dpm_level_enable_mask.sclk_dpm_enable_mask;
3763
			while (tmp >>= 1)
4169
			while (tmp >>= 1)
3764
				levels++;
4170
				levels++;
3765
			if (levels) {
4171
			if (levels) {
3766
				ret = ci_dpm_force_state_mclk(rdev, levels);
4172
				ret = ci_dpm_force_state_sclk(rdev, levels);
3767
				if (ret)
4173
				if (ret)
3768
					return ret;
4174
					return ret;
3769
				for (i = 0; i < rdev->usec_timeout; i++) {
4175
				for (i = 0; i < rdev->usec_timeout; i++) {
3770
					tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) &
4176
					tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) &
3771
					       CURR_MCLK_INDEX_MASK) >> CURR_MCLK_INDEX_SHIFT;
4177
					       CURR_SCLK_INDEX_MASK) >> CURR_SCLK_INDEX_SHIFT;
3772
					if (tmp == levels)
4178
					if (tmp == levels)
3773
						break;
4179
						break;
3774
					udelay(1);
4180
					udelay(1);
3775
				}
4181
				}
3776
			}
4182
			}
3777
		}
4183
		}
3778
		if ((!pi->pcie_dpm_key_disabled) &&
4184
		if ((!pi->mclk_dpm_key_disabled) &&
3779
		    pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
4185
		    pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
3780
			levels = 0;
4186
			levels = 0;
3781
			tmp = pi->dpm_level_enable_mask.pcie_dpm_enable_mask;
4187
			tmp = pi->dpm_level_enable_mask.mclk_dpm_enable_mask;
3782
			while (tmp >>= 1)
4188
			while (tmp >>= 1)
3783
				levels++;
4189
				levels++;
3784
			if (levels) {
4190
			if (levels) {
3785
				ret = ci_dpm_force_state_pcie(rdev, level);
4191
				ret = ci_dpm_force_state_mclk(rdev, levels);
3786
				if (ret)
4192
				if (ret)
3787
					return ret;
4193
					return ret;
3788
				for (i = 0; i < rdev->usec_timeout; i++) {
4194
				for (i = 0; i < rdev->usec_timeout; i++) {
3789
					tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX_1) &
4195
					tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) &
3790
					       CURR_PCIE_INDEX_MASK) >> CURR_PCIE_INDEX_SHIFT;
4196
					       CURR_MCLK_INDEX_MASK) >> CURR_MCLK_INDEX_SHIFT;
3791
					if (tmp == levels)
4197
					if (tmp == levels)
3792
						break;
4198
						break;
3793
					udelay(1);
4199
					udelay(1);
Line 3839... Line 4245...
3839
					break;
4245
					break;
3840
				udelay(1);
4246
				udelay(1);
3841
			}
4247
			}
3842
		}
4248
		}
3843
	} else if (level == RADEON_DPM_FORCED_LEVEL_AUTO) {
4249
	} else if (level == RADEON_DPM_FORCED_LEVEL_AUTO) {
3844
		if (!pi->sclk_dpm_key_disabled) {
-
 
3845
			smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_NoForcedLevel);
-
 
3846
			if (smc_result != PPSMC_Result_OK)
-
 
3847
				return -EINVAL;
-
 
3848
		}
-
 
3849
		if (!pi->mclk_dpm_key_disabled) {
-
 
3850
			smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_NoForcedLevel);
-
 
3851
			if (smc_result != PPSMC_Result_OK)
-
 
3852
				return -EINVAL;
-
 
3853
		}
-
 
3854
		if (!pi->pcie_dpm_key_disabled) {
4250
		if (!pi->pcie_dpm_key_disabled) {
-
 
4251
			PPSMC_Result smc_result;
-
 
4252
 
3855
			smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_PCIeDPM_UnForceLevel);
4253
			smc_result = ci_send_msg_to_smc(rdev,
-
 
4254
							PPSMC_MSG_PCIeDPM_UnForceLevel);
3856
			if (smc_result != PPSMC_Result_OK)
4255
			if (smc_result != PPSMC_Result_OK)
3857
				return -EINVAL;
4256
				return -EINVAL;
3858
		}
4257
		}
-
 
4258
		ret = ci_upload_dpm_level_enable_mask(rdev);
-
 
4259
		if (ret)
-
 
4260
			return ret;
3859
	}
4261
	}
Line 3860... Line 4262...
3860
 
4262
 
Line 3861... Line 4263...
3861
	rdev->pm.dpm.forced_level = level;
4263
	rdev->pm.dpm.forced_level = level;
Line 4059... Line 4461...
4059
	ci_table->num_entries = table->num_entries;
4461
	ci_table->num_entries = table->num_entries;
Line 4060... Line 4462...
4060
 
4462
 
4061
	return 0;
4463
	return 0;
Line -... Line 4464...
-
 
4464
}
-
 
4465
 
-
 
4466
static int ci_register_patching_mc_seq(struct radeon_device *rdev,
-
 
4467
				       struct ci_mc_reg_table *table)
-
 
4468
{
-
 
4469
	u8 i, k;
-
 
4470
	u32 tmp;
-
 
4471
	bool patch;
-
 
4472
 
-
 
4473
	tmp = RREG32(MC_SEQ_MISC0);
-
 
4474
	patch = ((tmp & 0x0000f00) == 0x300) ? true : false;
-
 
4475
 
-
 
4476
	if (patch &&
-
 
4477
	    ((rdev->pdev->device == 0x67B0) ||
-
 
4478
	     (rdev->pdev->device == 0x67B1))) {
-
 
4479
		for (i = 0; i < table->last; i++) {
-
 
4480
			if (table->last >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
-
 
4481
				return -EINVAL;
-
 
4482
			switch(table->mc_reg_address[i].s1 >> 2) {
-
 
4483
			case MC_SEQ_MISC1:
-
 
4484
				for (k = 0; k < table->num_entries; k++) {
-
 
4485
					if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
-
 
4486
					    (table->mc_reg_table_entry[k].mclk_max == 137500))
-
 
4487
						table->mc_reg_table_entry[k].mc_data[i] =
-
 
4488
							(table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFF8) |
-
 
4489
							0x00000007;
-
 
4490
				}
-
 
4491
				break;
-
 
4492
			case MC_SEQ_WR_CTL_D0:
-
 
4493
				for (k = 0; k < table->num_entries; k++) {
-
 
4494
					if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
-
 
4495
					    (table->mc_reg_table_entry[k].mclk_max == 137500))
-
 
4496
						table->mc_reg_table_entry[k].mc_data[i] =
-
 
4497
							(table->mc_reg_table_entry[k].mc_data[i] & 0xFFFF0F00) |
-
 
4498
							0x0000D0DD;
-
 
4499
				}
-
 
4500
				break;
-
 
4501
			case MC_SEQ_WR_CTL_D1:
-
 
4502
				for (k = 0; k < table->num_entries; k++) {
-
 
4503
					if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
-
 
4504
					    (table->mc_reg_table_entry[k].mclk_max == 137500))
-
 
4505
						table->mc_reg_table_entry[k].mc_data[i] =
-
 
4506
							(table->mc_reg_table_entry[k].mc_data[i] & 0xFFFF0F00) |
-
 
4507
							0x0000D0DD;
-
 
4508
				}
-
 
4509
				break;
-
 
4510
			case MC_SEQ_WR_CTL_2:
-
 
4511
				for (k = 0; k < table->num_entries; k++) {
-
 
4512
					if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
-
 
4513
					    (table->mc_reg_table_entry[k].mclk_max == 137500))
-
 
4514
						table->mc_reg_table_entry[k].mc_data[i] = 0;
-
 
4515
				}
-
 
4516
				break;
-
 
4517
			case MC_SEQ_CAS_TIMING:
-
 
4518
				for (k = 0; k < table->num_entries; k++) {
-
 
4519
					if (table->mc_reg_table_entry[k].mclk_max == 125000)
-
 
4520
						table->mc_reg_table_entry[k].mc_data[i] =
-
 
4521
							(table->mc_reg_table_entry[k].mc_data[i] & 0xFFE0FE0F) |
-
 
4522
							0x000C0140;
-
 
4523
					else if (table->mc_reg_table_entry[k].mclk_max == 137500)
-
 
4524
						table->mc_reg_table_entry[k].mc_data[i] =
-
 
4525
							(table->mc_reg_table_entry[k].mc_data[i] & 0xFFE0FE0F) |
-
 
4526
							0x000C0150;
-
 
4527
				}
-
 
4528
				break;
-
 
4529
			case MC_SEQ_MISC_TIMING:
-
 
4530
				for (k = 0; k < table->num_entries; k++) {
-
 
4531
					if (table->mc_reg_table_entry[k].mclk_max == 125000)
-
 
4532
						table->mc_reg_table_entry[k].mc_data[i] =
-
 
4533
							(table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFE0) |
-
 
4534
							0x00000030;
-
 
4535
					else if (table->mc_reg_table_entry[k].mclk_max == 137500)
-
 
4536
						table->mc_reg_table_entry[k].mc_data[i] =
-
 
4537
							(table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFE0) |
-
 
4538
							0x00000035;
-
 
4539
				}
-
 
4540
				break;
-
 
4541
			default:
-
 
4542
				break;
-
 
4543
			}
-
 
4544
		}
-
 
4545
 
-
 
4546
		WREG32(MC_SEQ_IO_DEBUG_INDEX, 3);
-
 
4547
		tmp = RREG32(MC_SEQ_IO_DEBUG_DATA);
-
 
4548
		tmp = (tmp & 0xFFF8FFFF) | (1 << 16);
-
 
4549
		WREG32(MC_SEQ_IO_DEBUG_INDEX, 3);
-
 
4550
		WREG32(MC_SEQ_IO_DEBUG_DATA, tmp);
-
 
4551
	}
-
 
4552
 
-
 
4553
	return 0;
4062
}
4554
}
4063
 
4555
 
4064
static int ci_initialize_mc_reg_table(struct radeon_device *rdev)
4556
static int ci_initialize_mc_reg_table(struct radeon_device *rdev)
4065
{
4557
{
4066
	struct ci_power_info *pi = ci_get_pi(rdev);
4558
	struct ci_power_info *pi = ci_get_pi(rdev);
Line 4102... Line 4594...
4102
	if (ret)
4594
	if (ret)
4103
		goto init_mc_done;
4595
		goto init_mc_done;
Line 4104... Line 4596...
4104
 
4596
 
Line -... Line 4597...
-
 
4597
	ci_set_s0_mc_reg_index(ci_table);
-
 
4598
 
-
 
4599
	ret = ci_register_patching_mc_seq(rdev, ci_table);
-
 
4600
	if (ret)
4105
	ci_set_s0_mc_reg_index(ci_table);
4601
		goto init_mc_done;
4106
 
4602
 
4107
	ret = ci_set_mc_special_registers(rdev, ci_table);
4603
	ret = ci_set_mc_special_registers(rdev, ci_table);
Line 4108... Line 4604...
4108
	if (ret)
4604
	if (ret)
Line 4698... Line 5194...
4698
	if (ret) {
5194
	if (ret) {
4699
		DRM_ERROR("ci_enable_power_containment failed\n");
5195
		DRM_ERROR("ci_enable_power_containment failed\n");
4700
		return ret;
5196
		return ret;
4701
	}
5197
	}
Line -... Line 5198...
-
 
5198
 
-
 
5199
	ret = ci_power_control_set_level(rdev);
-
 
5200
	if (ret) {
-
 
5201
		DRM_ERROR("ci_power_control_set_level failed\n");
-
 
5202
		return ret;
-
 
5203
	}
4702
 
5204
 
Line -... Line 5205...
-
 
5205
	ci_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
-
 
5206
 
-
 
5207
	ret = ci_enable_thermal_based_sclk_dpm(rdev, true);
-
 
5208
	if (ret) {
-
 
5209
		DRM_ERROR("ci_enable_thermal_based_sclk_dpm failed\n");
-
 
5210
		return ret;
-
 
5211
	}
-
 
5212
 
4703
	ci_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
5213
	ci_thermal_start_thermal_controller(rdev);
Line 4704... Line 5214...
4704
 
5214
 
4705
	ci_update_current_ps(rdev, boot_ps);
5215
	ci_update_current_ps(rdev, boot_ps);
Line 4706... Line 5216...
4706
 
5216
 
4707
	return 0;
5217
	return 0;
4708
}
5218
}
Line 4709... Line -...
4709
 
-
 
4710
int ci_dpm_late_enable(struct radeon_device *rdev)
5219
 
4711
{
5220
static int ci_set_temperature_range(struct radeon_device *rdev)
4712
	int ret;
5221
{
4713
 
-
 
4714
	if (rdev->irq.installed &&
5222
	int ret;
4715
	    r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
5223
 
-
 
5224
	ret = ci_thermal_enable_alert(rdev, false);
4716
#if 0
5225
	if (ret)
-
 
5226
		return ret;
4717
		PPSMC_Result result;
5227
	ret = ci_thermal_set_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
4718
#endif
-
 
4719
		ret = ci_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
-
 
4720
		if (ret) {
-
 
4721
			DRM_ERROR("ci_set_thermal_temperature_range failed\n");
-
 
4722
			return ret;
-
 
Line 4723... Line -...
4723
		}
-
 
4724
		rdev->irq.dpm_thermal = true;
-
 
4725
		radeon_irq_set(rdev);
5228
	if (ret)
4726
#if 0
5229
		return ret;
Line -... Line 5230...
-
 
5230
	ret = ci_thermal_enable_alert(rdev, true);
-
 
5231
	if (ret)
-
 
5232
		return ret;
-
 
5233
 
-
 
5234
	return ret;
-
 
5235
}
-
 
5236
 
-
 
5237
int ci_dpm_late_enable(struct radeon_device *rdev)
4727
		result = ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableThermalInterrupt);
5238
{
Line 4728... Line 5239...
4728
 
5239
	int ret;
4729
		if (result != PPSMC_Result_OK)
5240
 
Line 4744... Line 5255...
4744
	ci_dpm_powergate_uvd(rdev, false);
5255
	ci_dpm_powergate_uvd(rdev, false);
Line 4745... Line 5256...
4745
 
5256
 
4746
	if (!ci_is_smc_running(rdev))
5257
	if (!ci_is_smc_running(rdev))
Line -... Line 5258...
-
 
5258
		return;
-
 
5259
 
4747
		return;
5260
	ci_thermal_stop_thermal_controller(rdev);
4748
 
5261
 
4749
	if (pi->thermal_protection)
5262
	if (pi->thermal_protection)
4750
		ci_enable_thermal_protection(rdev, false);
5263
		ci_enable_thermal_protection(rdev, false);
4751
	ci_enable_power_containment(rdev, false);
5264
	ci_enable_power_containment(rdev, false);
4752
	ci_enable_smc_cac(rdev, false);
5265
	ci_enable_smc_cac(rdev, false);
4753
	ci_enable_didt(rdev, false);
5266
	ci_enable_didt(rdev, false);
4754
	ci_enable_spread_spectrum(rdev, false);
5267
	ci_enable_spread_spectrum(rdev, false);
4755
	ci_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, false);
5268
	ci_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, false);
4756
	ci_stop_dpm(rdev);
5269
	ci_stop_dpm(rdev);
4757
	ci_enable_ds_master_switch(rdev, true);
5270
	ci_enable_ds_master_switch(rdev, false);
4758
	ci_enable_ulv(rdev, false);
5271
	ci_enable_ulv(rdev, false);
4759
	ci_clear_vc(rdev);
5272
	ci_clear_vc(rdev);
4760
	ci_reset_to_default(rdev);
5273
	ci_reset_to_default(rdev);
-
 
5274
	ci_dpm_stop_smc(rdev);
Line 4761... Line 5275...
4761
	ci_dpm_stop_smc(rdev);
5275
	ci_force_switch_to_arb_f0(rdev);
4762
	ci_force_switch_to_arb_f0(rdev);
5276
	ci_enable_thermal_based_sclk_dpm(rdev, false);
Line 4763... Line 5277...
4763
 
5277
 
Line 4827... Line 5341...
4827
		ci_notify_link_speed_change_after_state_change(rdev, new_ps, old_ps);
5341
		ci_notify_link_speed_change_after_state_change(rdev, new_ps, old_ps);
Line 4828... Line 5342...
4828
 
5342
 
4829
	return 0;
5343
	return 0;
Line 4830... Line -...
4830
}
-
 
4831
 
-
 
4832
int ci_dpm_power_control_set_level(struct radeon_device *rdev)
-
 
4833
{
-
 
4834
	return ci_power_control_set_level(rdev);
-
 
4835
}
5344
}
4836
 
5345
 
4837
void ci_dpm_reset_asic(struct radeon_device *rdev)
5346
void ci_dpm_reset_asic(struct radeon_device *rdev)
4838
{
5347
{
Line 5091... Line 5600...
5091
}
5600
}
Line 5092... Line 5601...
5092
 
5601
 
5093
int ci_dpm_init(struct radeon_device *rdev)
5602
int ci_dpm_init(struct radeon_device *rdev)
5094
{
5603
{
-
 
5604
	int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info);
-
 
5605
	SMU7_Discrete_DpmTable  *dpm_table;
5095
	int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info);
5606
	struct radeon_gpio_rec gpio;
5096
	u16 data_offset, size;
5607
	u16 data_offset, size;
5097
	u8 frev, crev;
5608
	u8 frev, crev;
5098
	struct ci_power_info *pi;
5609
	struct ci_power_info *pi;
5099
	int ret;
5610
	int ret;
Line 5160... Line 5671...
5160
	pi->mclk_activity_target = CISLAND_MCLK_TARGETACTIVITY_DFLT;
5671
	pi->mclk_activity_target = CISLAND_MCLK_TARGETACTIVITY_DFLT;
Line 5161... Line 5672...
5161
 
5672
 
5162
	pi->sclk_dpm_key_disabled = 0;
5673
	pi->sclk_dpm_key_disabled = 0;
5163
	pi->mclk_dpm_key_disabled = 0;
5674
	pi->mclk_dpm_key_disabled = 0;
-
 
5675
	pi->pcie_dpm_key_disabled = 0;
Line 5164... Line 5676...
5164
	pi->pcie_dpm_key_disabled = 0;
5676
	pi->thermal_sclk_dpm_enabled = 0;
5165
 
5677
 
5166
	/* mclk dpm is unstable on some R7 260X cards with the old mc ucode */
5678
	/* mclk dpm is unstable on some R7 260X cards with the old mc ucode */
5167
	if ((rdev->pdev->device == 0x6658) &&
5679
	if ((rdev->pdev->device == 0x6658) &&
Line 5224... Line 5736...
5224
		pi->thermal_temp_setting.temperature_shutdown = 104000;
5736
		pi->thermal_temp_setting.temperature_shutdown = 104000;
5225
	}
5737
	}
Line 5226... Line 5738...
5226
 
5738
 
Line -... Line 5739...
-
 
5739
	pi->uvd_enabled = false;
-
 
5740
 
-
 
5741
	dpm_table = &pi->smc_state_table;
-
 
5742
 
-
 
5743
	gpio = radeon_atombios_lookup_gpio(rdev, VDDC_VRHOT_GPIO_PINID);
-
 
5744
	if (gpio.valid) {
-
 
5745
		dpm_table->VRHotGpio = gpio.shift;
-
 
5746
		rdev->pm.dpm.platform_caps |= ATOM_PP_PLATFORM_CAP_REGULATOR_HOT;
-
 
5747
	} else {
-
 
5748
		dpm_table->VRHotGpio = CISLANDS_UNUSED_GPIO_PIN;
-
 
5749
		rdev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_REGULATOR_HOT;
-
 
5750
	}
-
 
5751
 
-
 
5752
	gpio = radeon_atombios_lookup_gpio(rdev, PP_AC_DC_SWITCH_GPIO_PINID);
-
 
5753
	if (gpio.valid) {
-
 
5754
		dpm_table->AcDcGpio = gpio.shift;
-
 
5755
		rdev->pm.dpm.platform_caps |= ATOM_PP_PLATFORM_CAP_HARDWAREDC;
-
 
5756
	} else {
-
 
5757
		dpm_table->AcDcGpio = CISLANDS_UNUSED_GPIO_PIN;
-
 
5758
		rdev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_HARDWAREDC;
-
 
5759
	}
-
 
5760
 
-
 
5761
	gpio = radeon_atombios_lookup_gpio(rdev, VDDC_PCC_GPIO_PINID);
-
 
5762
	if (gpio.valid) {
-
 
5763
		u32 tmp = RREG32_SMC(CNB_PWRMGT_CNTL);
-
 
5764
 
-
 
5765
		switch (gpio.shift) {
-
 
5766
		case 0:
-
 
5767
			tmp &= ~GNB_SLOW_MODE_MASK;
-
 
5768
			tmp |= GNB_SLOW_MODE(1);
-
 
5769
			break;
-
 
5770
		case 1:
-
 
5771
			tmp &= ~GNB_SLOW_MODE_MASK;
-
 
5772
			tmp |= GNB_SLOW_MODE(2);
-
 
5773
			break;
-
 
5774
		case 2:
-
 
5775
			tmp |= GNB_SLOW;
-
 
5776
			break;
-
 
5777
		case 3:
-
 
5778
			tmp |= FORCE_NB_PS1;
-
 
5779
			break;
-
 
5780
		case 4:
-
 
5781
			tmp |= DPM_ENABLED;
-
 
5782
			break;
-
 
5783
		default:
-
 
5784
			DRM_ERROR("Invalid PCC GPIO: %u!\n", gpio.shift);
-
 
5785
			break;
-
 
5786
		}
-
 
5787
		WREG32_SMC(CNB_PWRMGT_CNTL, tmp);
5227
	pi->uvd_enabled = false;
5788
	}
5228
 
5789
 
5229
	pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_NONE;
5790
	pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_NONE;
5230
	pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_NONE;
5791
	pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_NONE;
5231
	pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_NONE;
5792
	pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_NONE;
Line 5285... Line 5846...
5285
	if ((rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk == 0) ||
5846
	if ((rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk == 0) ||
5286
	    (rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk == 0))
5847
	    (rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk == 0))
5287
		rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc =
5848
		rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc =
5288
			rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
5849
			rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
Line -... Line 5850...
-
 
5850
 
-
 
5851
	pi->fan_ctrl_is_in_default_mode = true;
5289
 
5852
 
5290
	return 0;
5853
	return 0;
Line 5291... Line 5854...
5291
}
5854
}
5292
 
5855
 
5293
void ci_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
5856
void ci_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
-
 
5857
						    struct seq_file *m)
-
 
5858
{
5294
						    struct seq_file *m)
5859
	struct ci_power_info *pi = ci_get_pi(rdev);
5295
{
5860
	struct radeon_ps *rps = &pi->current_rps;
Line -... Line 5861...
-
 
5861
	u32 sclk = ci_get_average_sclk_freq(rdev);
-
 
5862
	u32 mclk = ci_get_average_mclk_freq(rdev);
5296
	u32 sclk = ci_get_average_sclk_freq(rdev);
5863
 
5297
	u32 mclk = ci_get_average_mclk_freq(rdev);
5864
	seq_printf(m, "uvd    %sabled\n", pi->uvd_enabled ? "en" : "dis");
5298
 
5865
	seq_printf(m, "vce    %sabled\n", rps->vce_active ? "en" : "dis");
Line 5299... Line 5866...
5299
	seq_printf(m, "power level avg    sclk: %u mclk: %u\n",
5866
	seq_printf(m, "power level avg    sclk: %u mclk: %u\n",