Subversion Repositories Kolibri OS

Compare Revisions

Regard whitespace Rev 6103 → Rev 6104

/drivers/video/drm/radeon/si_dpm.c
1740,6 → 1740,7
struct ni_ps *ni_get_ps(struct radeon_ps *rps);
 
extern int si_mc_load_microcode(struct radeon_device *rdev);
extern void vce_v1_0_enable_mgcg(struct radeon_device *rdev, bool enable);
 
static int si_populate_voltage_value(struct radeon_device *rdev,
const struct atom_voltage_table *table,
1756,6 → 1757,9
u32 engine_clock,
SISLANDS_SMC_SCLK_VALUE *sclk);
 
static void si_thermal_start_smc_fan_control(struct radeon_device *rdev);
static void si_fan_ctrl_set_default_mode(struct radeon_device *rdev);
 
static struct si_power_info *si_get_pi(struct radeon_device *rdev)
{
struct si_power_info *pi = rdev->pm.dpm.priv;
2908,6 → 2912,76
return ret;
}
 
struct si_dpm_quirk {
u32 chip_vendor;
u32 chip_device;
u32 subsys_vendor;
u32 subsys_device;
u32 max_sclk;
u32 max_mclk;
};
 
/* cards with dpm stability problems */
static struct si_dpm_quirk si_dpm_quirk_list[] = {
/* PITCAIRN - https://bugs.freedesktop.org/show_bug.cgi?id=76490 */
{ PCI_VENDOR_ID_ATI, 0x6810, 0x1462, 0x3036, 0, 120000 },
{ PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0xe271, 0, 120000 },
{ PCI_VENDOR_ID_ATI, 0x6810, 0x174b, 0xe271, 85000, 90000 },
{ PCI_VENDOR_ID_ATI, 0x6811, 0x1462, 0x2015, 0, 120000 },
{ PCI_VENDOR_ID_ATI, 0x6811, 0x1043, 0x2015, 0, 120000 },
{ 0, 0, 0, 0 },
};
 
static u16 si_get_lower_of_leakage_and_vce_voltage(struct radeon_device *rdev,
u16 vce_voltage)
{
u16 highest_leakage = 0;
struct si_power_info *si_pi = si_get_pi(rdev);
int i;
 
for (i = 0; i < si_pi->leakage_voltage.count; i++){
if (highest_leakage < si_pi->leakage_voltage.entries[i].voltage)
highest_leakage = si_pi->leakage_voltage.entries[i].voltage;
}
 
if (si_pi->leakage_voltage.count && (highest_leakage < vce_voltage))
return highest_leakage;
 
return vce_voltage;
}
 
static int si_get_vce_clock_voltage(struct radeon_device *rdev,
u32 evclk, u32 ecclk, u16 *voltage)
{
u32 i;
int ret = -EINVAL;
struct radeon_vce_clock_voltage_dependency_table *table =
&rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
 
if (((evclk == 0) && (ecclk == 0)) ||
(table && (table->count == 0))) {
*voltage = 0;
return 0;
}
 
for (i = 0; i < table->count; i++) {
if ((evclk <= table->entries[i].evclk) &&
(ecclk <= table->entries[i].ecclk)) {
*voltage = table->entries[i].v;
ret = 0;
break;
}
}
 
/* if no match return the highest voltage */
if (ret)
*voltage = table->entries[table->count - 1].v;
 
*voltage = si_get_lower_of_leakage_and_vce_voltage(rdev, *voltage);
 
return ret;
}
 
static void si_apply_state_adjust_rules(struct radeon_device *rdev,
struct radeon_ps *rps)
{
2916,10 → 2990,35
bool disable_mclk_switching = false;
bool disable_sclk_switching = false;
u32 mclk, sclk;
u16 vddc, vddci;
u16 vddc, vddci, min_vce_voltage = 0;
u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc;
u32 max_sclk = 0, max_mclk = 0;
int i;
struct si_dpm_quirk *p = si_dpm_quirk_list;
 
/* Apply dpm quirks */
while (p && p->chip_device != 0) {
if (rdev->pdev->vendor == p->chip_vendor &&
rdev->pdev->device == p->chip_device &&
rdev->pdev->subsystem_vendor == p->subsys_vendor &&
rdev->pdev->subsystem_device == p->subsys_device) {
max_sclk = p->max_sclk;
max_mclk = p->max_mclk;
break;
}
++p;
}
 
if (rps->vce_active) {
rps->evclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].evclk;
rps->ecclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].ecclk;
si_get_vce_clock_voltage(rdev, rps->evclk, rps->ecclk,
&min_vce_voltage);
} else {
rps->evclk = 0;
rps->ecclk = 0;
}
 
if ((rdev->pm.dpm.new_active_crtc_count > 1) ||
ni_dpm_vblank_too_short(rdev))
disable_mclk_switching = true;
2972,7 → 3071,15
if (ps->performance_levels[i].mclk > max_mclk_vddc)
ps->performance_levels[i].mclk = max_mclk_vddc;
}
if (max_mclk) {
if (ps->performance_levels[i].mclk > max_mclk)
ps->performance_levels[i].mclk = max_mclk;
}
if (max_sclk) {
if (ps->performance_levels[i].sclk > max_sclk)
ps->performance_levels[i].sclk = max_sclk;
}
}
 
/* XXX validate the min clocks required for display */
 
2992,6 → 3099,13
vddc = ps->performance_levels[0].vddc;
}
 
if (rps->vce_active) {
if (sclk < rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].sclk)
sclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].sclk;
if (mclk < rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].mclk)
mclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].mclk;
}
 
/* adjusted low state */
ps->performance_levels[0].sclk = sclk;
ps->performance_levels[0].mclk = mclk;
3041,6 → 3155,8
&ps->performance_levels[i]);
 
for (i = 0; i < ps->performance_level_count; i++) {
if (ps->performance_levels[i].vddc < min_vce_voltage)
ps->performance_levels[i].vddc = min_vce_voltage;
btc_apply_voltage_dependency_rules(&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
ps->performance_levels[i].sclk,
max_limits->vddc, &ps->performance_levels[i].vddc);
3067,7 → 3183,6
if (ps->performance_levels[i].vddc > rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddc)
ps->dc_compatible = false;
}
 
}
 
#if 0
3320,11 → 3435,13
return 0;
}
 
#if 0
static int si_set_boot_state(struct radeon_device *rdev)
{
return (si_send_msg_to_smc(rdev, PPSMC_MSG_SwitchToInitialState) == PPSMC_Result_OK) ?
0 : -EINVAL;
}
#endif
 
static int si_set_sw_state(struct radeon_device *rdev)
{
5814,6 → 5931,21
}
}
 
static void si_set_vce_clock(struct radeon_device *rdev,
struct radeon_ps *new_rps,
struct radeon_ps *old_rps)
{
if ((old_rps->evclk != new_rps->evclk) ||
(old_rps->ecclk != new_rps->ecclk)) {
/* turn the clocks on when encoding, off otherwise */
if (new_rps->evclk || new_rps->ecclk)
vce_v1_0_enable_mgcg(rdev, false);
else
vce_v1_0_enable_mgcg(rdev, true);
radeon_set_vce_clocks(rdev, new_rps->evclk, new_rps->ecclk);
}
}
 
void si_dpm_setup_asic(struct radeon_device *rdev)
{
int r;
5934,6 → 6066,10
slope1 = (u16)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
slope2 = (u16)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
 
fan_table.temp_min = cpu_to_be16((50 + rdev->pm.dpm.fan.t_min) / 100);
fan_table.temp_med = cpu_to_be16((50 + rdev->pm.dpm.fan.t_med) / 100);
fan_table.temp_max = cpu_to_be16((50 + rdev->pm.dpm.fan.t_max) / 100);
 
fan_table.slope1 = cpu_to_be16(slope1);
fan_table.slope2 = cpu_to_be16(slope2);
 
5973,28 → 6109,34
 
static int si_fan_ctrl_start_smc_fan_control(struct radeon_device *rdev)
{
struct si_power_info *si_pi = si_get_pi(rdev);
PPSMC_Result ret;
 
ret = si_send_msg_to_smc(rdev, PPSMC_StartFanControl);
if (ret == PPSMC_Result_OK)
if (ret == PPSMC_Result_OK) {
si_pi->fan_is_controlled_by_smc = true;
return 0;
else
} else {
return -EINVAL;
}
}
 
static int si_fan_ctrl_stop_smc_fan_control(struct radeon_device *rdev)
{
struct si_power_info *si_pi = si_get_pi(rdev);
PPSMC_Result ret;
 
ret = si_send_msg_to_smc(rdev, PPSMC_StopFanControl);
if (ret == PPSMC_Result_OK)
 
if (ret == PPSMC_Result_OK) {
si_pi->fan_is_controlled_by_smc = false;
return 0;
else
} else {
return -EINVAL;
}
}
 
#if 0
static int si_fan_ctrl_get_fan_speed_percent(struct radeon_device *rdev,
int si_fan_ctrl_get_fan_speed_percent(struct radeon_device *rdev,
u32 *speed)
{
u32 duty, duty100;
6019,9 → 6161,10
return 0;
}
 
static int si_fan_ctrl_set_fan_speed_percent(struct radeon_device *rdev,
int si_fan_ctrl_set_fan_speed_percent(struct radeon_device *rdev,
u32 speed)
{
struct si_power_info *si_pi = si_get_pi(rdev);
u32 tmp;
u32 duty, duty100;
u64 tmp64;
6029,12 → 6172,12
if (rdev->pm.no_fan)
return -ENOENT;
 
if (si_pi->fan_is_controlled_by_smc)
return -EINVAL;
 
if (speed > 100)
return -EINVAL;
 
if (rdev->pm.dpm.fan.ucode_fan_control)
si_fan_ctrl_stop_smc_fan_control(rdev);
 
duty100 = (RREG32(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT;
 
if (duty100 == 0)
6048,11 → 6191,38
tmp |= FDO_STATIC_DUTY(duty);
WREG32(CG_FDO_CTRL0, tmp);
 
si_fan_ctrl_set_static_mode(rdev, FDO_PWM_MODE_STATIC);
return 0;
}
 
void si_fan_ctrl_set_mode(struct radeon_device *rdev, u32 mode)
{
if (mode) {
/* stop auto-manage */
if (rdev->pm.dpm.fan.ucode_fan_control)
si_fan_ctrl_stop_smc_fan_control(rdev);
si_fan_ctrl_set_static_mode(rdev, mode);
} else {
/* restart auto-manage */
if (rdev->pm.dpm.fan.ucode_fan_control)
si_thermal_start_smc_fan_control(rdev);
else
si_fan_ctrl_set_default_mode(rdev);
}
}
 
u32 si_fan_ctrl_get_mode(struct radeon_device *rdev)
{
struct si_power_info *si_pi = si_get_pi(rdev);
u32 tmp;
 
if (si_pi->fan_is_controlled_by_smc)
return 0;
 
tmp = RREG32(CG_FDO_CTRL2) & FDO_PWM_MODE_MASK;
return (tmp >> FDO_PWM_MODE_SHIFT);
}
 
#if 0
static int si_fan_ctrl_get_fan_speed_rpm(struct radeon_device *rdev,
u32 *speed)
{
6464,6 → 6634,7
return ret;
}
ni_set_uvd_clock_after_set_eng_clock(rdev, new_ps, old_ps);
si_set_vce_clock(rdev, new_ps, old_ps);
if (eg_pi->pcie_performance_request)
si_notify_link_speed_change_after_state_change(rdev, new_ps, old_ps);
ret = si_set_power_state_conditionally_enable_ulv(rdev, new_ps);
6499,7 → 6670,7
ni_update_current_ps(rdev, new_ps);
}
 
 
#if 0
void si_dpm_reset_asic(struct radeon_device *rdev)
{
si_restrict_performance_levels_before_switch(rdev);
6506,6 → 6677,7
si_disable_ulv(rdev);
si_set_boot_state(rdev);
}
#endif
 
void si_dpm_display_configuration_changed(struct radeon_device *rdev)
{
6709,6 → 6881,21
power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
}
rdev->pm.dpm.num_ps = state_array->ucNumEntries;
 
/* fill in the vce power states */
for (i = 0; i < RADEON_MAX_VCE_LEVELS; i++) {
u32 sclk, mclk;
clock_array_index = rdev->pm.dpm.vce_states[i].clk_idx;
clock_info = (union pplib_clock_info *)
&clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize];
sclk = le16_to_cpu(clock_info->si.usEngineClockLow);
sclk |= clock_info->si.ucEngineClockHigh << 16;
mclk = le16_to_cpu(clock_info->si.usMemoryClockLow);
mclk |= clock_info->si.ucMemoryClockHigh << 16;
rdev->pm.dpm.vce_states[i].sclk = sclk;
rdev->pm.dpm.vce_states[i].mclk = mclk;
}
 
return 0;
}
 
6753,10 → 6940,11
if (ret)
return ret;
 
ret = si_parse_power_table(rdev);
ret = r600_parse_extended_power_table(rdev);
if (ret)
return ret;
ret = r600_parse_extended_power_table(rdev);
 
ret = si_parse_power_table(rdev);
if (ret)
return ret;
 
6873,7 → 7061,6
rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
 
si_pi->fan_ctrl_is_in_default_mode = true;
rdev->pm.dpm.fan.ucode_fan_control = false;
 
return 0;
}
6911,3 → 7098,39
current_index, pl->sclk, pl->mclk, pl->vddc, pl->vddci, pl->pcie_gen + 1);
}
}
 
u32 si_dpm_get_current_sclk(struct radeon_device *rdev)
{
struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
struct radeon_ps *rps = &eg_pi->current_rps;
struct ni_ps *ps = ni_get_ps(rps);
struct rv7xx_pl *pl;
u32 current_index =
(RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_STATE_INDEX_MASK) >>
CURRENT_STATE_INDEX_SHIFT;
 
if (current_index >= ps->performance_level_count) {
return 0;
} else {
pl = &ps->performance_levels[current_index];
return pl->sclk;
}
}
 
u32 si_dpm_get_current_mclk(struct radeon_device *rdev)
{
struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
struct radeon_ps *rps = &eg_pi->current_rps;
struct ni_ps *ps = ni_get_ps(rps);
struct rv7xx_pl *pl;
u32 current_index =
(RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_STATE_INDEX_MASK) >>
CURRENT_STATE_INDEX_SHIFT;
 
if (current_index >= ps->performance_level_count) {
return 0;
} else {
pl = &ps->performance_levels[current_index];
return pl->mclk;
}
}