Subversion Repositories Kolibri OS

Compare Revisions

Regard whitespace Rev 5077 → Rev 5078

/drivers/video/drm/radeon/sumo_dpm.c
0,0 → 1,1912
/*
* Copyright 2012 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
 
#include "drmP.h"
#include "radeon.h"
#include "sumod.h"
#include "r600_dpm.h"
#include "cypress_dpm.h"
#include "sumo_dpm.h"
#include <linux/seq_file.h>
 
#define SUMO_MAX_DEEPSLEEP_DIVIDER_ID 5
#define SUMO_MINIMUM_ENGINE_CLOCK 800
#define BOOST_DPM_LEVEL 7
 
static const u32 sumo_utc[SUMO_PM_NUMBER_OF_TC] =
{
SUMO_UTC_DFLT_00,
SUMO_UTC_DFLT_01,
SUMO_UTC_DFLT_02,
SUMO_UTC_DFLT_03,
SUMO_UTC_DFLT_04,
SUMO_UTC_DFLT_05,
SUMO_UTC_DFLT_06,
SUMO_UTC_DFLT_07,
SUMO_UTC_DFLT_08,
SUMO_UTC_DFLT_09,
SUMO_UTC_DFLT_10,
SUMO_UTC_DFLT_11,
SUMO_UTC_DFLT_12,
SUMO_UTC_DFLT_13,
SUMO_UTC_DFLT_14,
};
 
static const u32 sumo_dtc[SUMO_PM_NUMBER_OF_TC] =
{
SUMO_DTC_DFLT_00,
SUMO_DTC_DFLT_01,
SUMO_DTC_DFLT_02,
SUMO_DTC_DFLT_03,
SUMO_DTC_DFLT_04,
SUMO_DTC_DFLT_05,
SUMO_DTC_DFLT_06,
SUMO_DTC_DFLT_07,
SUMO_DTC_DFLT_08,
SUMO_DTC_DFLT_09,
SUMO_DTC_DFLT_10,
SUMO_DTC_DFLT_11,
SUMO_DTC_DFLT_12,
SUMO_DTC_DFLT_13,
SUMO_DTC_DFLT_14,
};
 
static struct sumo_ps *sumo_get_ps(struct radeon_ps *rps)
{
struct sumo_ps *ps = rps->ps_priv;
 
return ps;
}
 
struct sumo_power_info *sumo_get_pi(struct radeon_device *rdev)
{
struct sumo_power_info *pi = rdev->pm.dpm.priv;
 
return pi;
}
 
static void sumo_gfx_clockgating_enable(struct radeon_device *rdev, bool enable)
{
if (enable)
WREG32_P(SCLK_PWRMGT_CNTL, DYN_GFX_CLK_OFF_EN, ~DYN_GFX_CLK_OFF_EN);
else {
WREG32_P(SCLK_PWRMGT_CNTL, 0, ~DYN_GFX_CLK_OFF_EN);
WREG32_P(SCLK_PWRMGT_CNTL, GFX_CLK_FORCE_ON, ~GFX_CLK_FORCE_ON);
WREG32_P(SCLK_PWRMGT_CNTL, 0, ~GFX_CLK_FORCE_ON);
RREG32(GB_ADDR_CONFIG);
}
}
 
#define CGCG_CGTT_LOCAL0_MASK 0xE5BFFFFF
#define CGCG_CGTT_LOCAL1_MASK 0xEFFF07FF
 
static void sumo_mg_clockgating_enable(struct radeon_device *rdev, bool enable)
{
u32 local0;
u32 local1;
 
local0 = RREG32(CG_CGTT_LOCAL_0);
local1 = RREG32(CG_CGTT_LOCAL_1);
 
if (enable) {
WREG32(CG_CGTT_LOCAL_0, (0 & CGCG_CGTT_LOCAL0_MASK) | (local0 & ~CGCG_CGTT_LOCAL0_MASK) );
WREG32(CG_CGTT_LOCAL_1, (0 & CGCG_CGTT_LOCAL1_MASK) | (local1 & ~CGCG_CGTT_LOCAL1_MASK) );
} else {
WREG32(CG_CGTT_LOCAL_0, (0xFFFFFFFF & CGCG_CGTT_LOCAL0_MASK) | (local0 & ~CGCG_CGTT_LOCAL0_MASK) );
WREG32(CG_CGTT_LOCAL_1, (0xFFFFCFFF & CGCG_CGTT_LOCAL1_MASK) | (local1 & ~CGCG_CGTT_LOCAL1_MASK) );
}
}
 
static void sumo_program_git(struct radeon_device *rdev)
{
u32 p, u;
u32 xclk = radeon_get_xclk(rdev);
 
r600_calculate_u_and_p(SUMO_GICST_DFLT,
xclk, 16, &p, &u);
 
WREG32_P(CG_GIT, CG_GICST(p), ~CG_GICST_MASK);
}
 
static void sumo_program_grsd(struct radeon_device *rdev)
{
u32 p, u;
u32 xclk = radeon_get_xclk(rdev);
u32 grs = 256 * 25 / 100;
 
r600_calculate_u_and_p(1, xclk, 14, &p, &u);
 
WREG32(CG_GCOOR, PHC(grs) | SDC(p) | SU(u));
}
 
void sumo_gfx_clockgating_initialize(struct radeon_device *rdev)
{
sumo_program_git(rdev);
sumo_program_grsd(rdev);
}
 
static void sumo_gfx_powergating_initialize(struct radeon_device *rdev)
{
u32 rcu_pwr_gating_cntl;
u32 p, u;
u32 p_c, p_p, d_p;
u32 r_t, i_t;
u32 xclk = radeon_get_xclk(rdev);
 
if (rdev->family == CHIP_PALM) {
p_c = 4;
d_p = 10;
r_t = 10;
i_t = 4;
p_p = 50 + 1000/200 + 6 * 32;
} else {
p_c = 16;
d_p = 50;
r_t = 50;
i_t = 50;
p_p = 113;
}
 
WREG32(CG_SCRATCH2, 0x01B60A17);
 
r600_calculate_u_and_p(SUMO_GFXPOWERGATINGT_DFLT,
xclk, 16, &p, &u);
 
WREG32_P(CG_PWR_GATING_CNTL, PGP(p) | PGU(u),
~(PGP_MASK | PGU_MASK));
 
r600_calculate_u_and_p(SUMO_VOLTAGEDROPT_DFLT,
xclk, 16, &p, &u);
 
WREG32_P(CG_CG_VOLTAGE_CNTL, PGP(p) | PGU(u),
~(PGP_MASK | PGU_MASK));
 
if (rdev->family == CHIP_PALM) {
WREG32_RCU(RCU_PWR_GATING_SEQ0, 0x10103210);
WREG32_RCU(RCU_PWR_GATING_SEQ1, 0x10101010);
} else {
WREG32_RCU(RCU_PWR_GATING_SEQ0, 0x76543210);
WREG32_RCU(RCU_PWR_GATING_SEQ1, 0xFEDCBA98);
}
 
rcu_pwr_gating_cntl = RREG32_RCU(RCU_PWR_GATING_CNTL);
rcu_pwr_gating_cntl &=
~(RSVD_MASK | PCV_MASK | PGS_MASK);
rcu_pwr_gating_cntl |= PCV(p_c) | PGS(1) | PWR_GATING_EN;
if (rdev->family == CHIP_PALM) {
rcu_pwr_gating_cntl &= ~PCP_MASK;
rcu_pwr_gating_cntl |= PCP(0x77);
}
WREG32_RCU(RCU_PWR_GATING_CNTL, rcu_pwr_gating_cntl);
 
rcu_pwr_gating_cntl = RREG32_RCU(RCU_PWR_GATING_CNTL_2);
rcu_pwr_gating_cntl &= ~(MPPU_MASK | MPPD_MASK);
rcu_pwr_gating_cntl |= MPPU(p_p) | MPPD(50);
WREG32_RCU(RCU_PWR_GATING_CNTL_2, rcu_pwr_gating_cntl);
 
rcu_pwr_gating_cntl = RREG32_RCU(RCU_PWR_GATING_CNTL_3);
rcu_pwr_gating_cntl &= ~(DPPU_MASK | DPPD_MASK);
rcu_pwr_gating_cntl |= DPPU(d_p) | DPPD(50);
WREG32_RCU(RCU_PWR_GATING_CNTL_3, rcu_pwr_gating_cntl);
 
rcu_pwr_gating_cntl = RREG32_RCU(RCU_PWR_GATING_CNTL_4);
rcu_pwr_gating_cntl &= ~(RT_MASK | IT_MASK);
rcu_pwr_gating_cntl |= RT(r_t) | IT(i_t);
WREG32_RCU(RCU_PWR_GATING_CNTL_4, rcu_pwr_gating_cntl);
 
if (rdev->family == CHIP_PALM)
WREG32_RCU(RCU_PWR_GATING_CNTL_5, 0xA02);
 
sumo_smu_pg_init(rdev);
 
rcu_pwr_gating_cntl = RREG32_RCU(RCU_PWR_GATING_CNTL);
rcu_pwr_gating_cntl &=
~(RSVD_MASK | PCV_MASK | PGS_MASK);
rcu_pwr_gating_cntl |= PCV(p_c) | PGS(4) | PWR_GATING_EN;
if (rdev->family == CHIP_PALM) {
rcu_pwr_gating_cntl &= ~PCP_MASK;
rcu_pwr_gating_cntl |= PCP(0x77);
}
WREG32_RCU(RCU_PWR_GATING_CNTL, rcu_pwr_gating_cntl);
 
if (rdev->family == CHIP_PALM) {
rcu_pwr_gating_cntl = RREG32_RCU(RCU_PWR_GATING_CNTL_2);
rcu_pwr_gating_cntl &= ~(MPPU_MASK | MPPD_MASK);
rcu_pwr_gating_cntl |= MPPU(113) | MPPD(50);
WREG32_RCU(RCU_PWR_GATING_CNTL_2, rcu_pwr_gating_cntl);
 
rcu_pwr_gating_cntl = RREG32_RCU(RCU_PWR_GATING_CNTL_3);
rcu_pwr_gating_cntl &= ~(DPPU_MASK | DPPD_MASK);
rcu_pwr_gating_cntl |= DPPU(16) | DPPD(50);
WREG32_RCU(RCU_PWR_GATING_CNTL_3, rcu_pwr_gating_cntl);
}
 
sumo_smu_pg_init(rdev);
 
rcu_pwr_gating_cntl = RREG32_RCU(RCU_PWR_GATING_CNTL);
rcu_pwr_gating_cntl &=
~(RSVD_MASK | PCV_MASK | PGS_MASK);
rcu_pwr_gating_cntl |= PGS(5) | PWR_GATING_EN;
 
if (rdev->family == CHIP_PALM) {
rcu_pwr_gating_cntl |= PCV(4);
rcu_pwr_gating_cntl &= ~PCP_MASK;
rcu_pwr_gating_cntl |= PCP(0x77);
} else
rcu_pwr_gating_cntl |= PCV(11);
WREG32_RCU(RCU_PWR_GATING_CNTL, rcu_pwr_gating_cntl);
 
if (rdev->family == CHIP_PALM) {
rcu_pwr_gating_cntl = RREG32_RCU(RCU_PWR_GATING_CNTL_2);
rcu_pwr_gating_cntl &= ~(MPPU_MASK | MPPD_MASK);
rcu_pwr_gating_cntl |= MPPU(113) | MPPD(50);
WREG32_RCU(RCU_PWR_GATING_CNTL_2, rcu_pwr_gating_cntl);
 
rcu_pwr_gating_cntl = RREG32_RCU(RCU_PWR_GATING_CNTL_3);
rcu_pwr_gating_cntl &= ~(DPPU_MASK | DPPD_MASK);
rcu_pwr_gating_cntl |= DPPU(22) | DPPD(50);
WREG32_RCU(RCU_PWR_GATING_CNTL_3, rcu_pwr_gating_cntl);
}
 
sumo_smu_pg_init(rdev);
}
 
static void sumo_gfx_powergating_enable(struct radeon_device *rdev, bool enable)
{
if (enable)
WREG32_P(CG_PWR_GATING_CNTL, DYN_PWR_DOWN_EN, ~DYN_PWR_DOWN_EN);
else {
WREG32_P(CG_PWR_GATING_CNTL, 0, ~DYN_PWR_DOWN_EN);
RREG32(GB_ADDR_CONFIG);
}
}
 
static int sumo_enable_clock_power_gating(struct radeon_device *rdev)
{
struct sumo_power_info *pi = sumo_get_pi(rdev);
 
if (pi->enable_gfx_clock_gating)
sumo_gfx_clockgating_initialize(rdev);
if (pi->enable_gfx_power_gating)
sumo_gfx_powergating_initialize(rdev);
if (pi->enable_mg_clock_gating)
sumo_mg_clockgating_enable(rdev, true);
if (pi->enable_gfx_clock_gating)
sumo_gfx_clockgating_enable(rdev, true);
if (pi->enable_gfx_power_gating)
sumo_gfx_powergating_enable(rdev, true);
 
return 0;
}
 
static void sumo_disable_clock_power_gating(struct radeon_device *rdev)
{
struct sumo_power_info *pi = sumo_get_pi(rdev);
 
if (pi->enable_gfx_clock_gating)
sumo_gfx_clockgating_enable(rdev, false);
if (pi->enable_gfx_power_gating)
sumo_gfx_powergating_enable(rdev, false);
if (pi->enable_mg_clock_gating)
sumo_mg_clockgating_enable(rdev, false);
}
 
static void sumo_calculate_bsp(struct radeon_device *rdev,
u32 high_clk)
{
struct sumo_power_info *pi = sumo_get_pi(rdev);
u32 xclk = radeon_get_xclk(rdev);
 
pi->pasi = 65535 * 100 / high_clk;
pi->asi = 65535 * 100 / high_clk;
 
r600_calculate_u_and_p(pi->asi,
xclk, 16, &pi->bsp, &pi->bsu);
 
r600_calculate_u_and_p(pi->pasi,
xclk, 16, &pi->pbsp, &pi->pbsu);
 
pi->dsp = BSP(pi->bsp) | BSU(pi->bsu);
pi->psp = BSP(pi->pbsp) | BSU(pi->pbsu);
}
 
static void sumo_init_bsp(struct radeon_device *rdev)
{
struct sumo_power_info *pi = sumo_get_pi(rdev);
 
WREG32(CG_BSP_0, pi->psp);
}
 
 
static void sumo_program_bsp(struct radeon_device *rdev,
struct radeon_ps *rps)
{
struct sumo_power_info *pi = sumo_get_pi(rdev);
struct sumo_ps *ps = sumo_get_ps(rps);
u32 i;
u32 highest_engine_clock = ps->levels[ps->num_levels - 1].sclk;
 
if (ps->flags & SUMO_POWERSTATE_FLAGS_BOOST_STATE)
highest_engine_clock = pi->boost_pl.sclk;
 
sumo_calculate_bsp(rdev, highest_engine_clock);
 
for (i = 0; i < ps->num_levels - 1; i++)
WREG32(CG_BSP_0 + (i * 4), pi->dsp);
 
WREG32(CG_BSP_0 + (i * 4), pi->psp);
 
if (ps->flags & SUMO_POWERSTATE_FLAGS_BOOST_STATE)
WREG32(CG_BSP_0 + (BOOST_DPM_LEVEL * 4), pi->psp);
}
 
static void sumo_write_at(struct radeon_device *rdev,
u32 index, u32 value)
{
if (index == 0)
WREG32(CG_AT_0, value);
else if (index == 1)
WREG32(CG_AT_1, value);
else if (index == 2)
WREG32(CG_AT_2, value);
else if (index == 3)
WREG32(CG_AT_3, value);
else if (index == 4)
WREG32(CG_AT_4, value);
else if (index == 5)
WREG32(CG_AT_5, value);
else if (index == 6)
WREG32(CG_AT_6, value);
else if (index == 7)
WREG32(CG_AT_7, value);
}
 
static void sumo_program_at(struct radeon_device *rdev,
struct radeon_ps *rps)
{
struct sumo_power_info *pi = sumo_get_pi(rdev);
struct sumo_ps *ps = sumo_get_ps(rps);
u32 asi;
u32 i;
u32 m_a;
u32 a_t;
u32 r[SUMO_MAX_HARDWARE_POWERLEVELS];
u32 l[SUMO_MAX_HARDWARE_POWERLEVELS];
 
r[0] = SUMO_R_DFLT0;
r[1] = SUMO_R_DFLT1;
r[2] = SUMO_R_DFLT2;
r[3] = SUMO_R_DFLT3;
r[4] = SUMO_R_DFLT4;
 
l[0] = SUMO_L_DFLT0;
l[1] = SUMO_L_DFLT1;
l[2] = SUMO_L_DFLT2;
l[3] = SUMO_L_DFLT3;
l[4] = SUMO_L_DFLT4;
 
for (i = 0; i < ps->num_levels; i++) {
asi = (i == ps->num_levels - 1) ? pi->pasi : pi->asi;
 
m_a = asi * ps->levels[i].sclk / 100;
 
a_t = CG_R(m_a * r[i] / 100) | CG_L(m_a * l[i] / 100);
 
sumo_write_at(rdev, i, a_t);
}
 
if (ps->flags & SUMO_POWERSTATE_FLAGS_BOOST_STATE) {
asi = pi->pasi;
 
m_a = asi * pi->boost_pl.sclk / 100;
 
a_t = CG_R(m_a * r[ps->num_levels - 1] / 100) |
CG_L(m_a * l[ps->num_levels - 1] / 100);
 
sumo_write_at(rdev, BOOST_DPM_LEVEL, a_t);
}
}
 
static void sumo_program_tp(struct radeon_device *rdev)
{
int i;
enum r600_td td = R600_TD_DFLT;
 
for (i = 0; i < SUMO_PM_NUMBER_OF_TC; i++) {
WREG32_P(CG_FFCT_0 + (i * 4), UTC_0(sumo_utc[i]), ~UTC_0_MASK);
WREG32_P(CG_FFCT_0 + (i * 4), DTC_0(sumo_dtc[i]), ~DTC_0_MASK);
}
 
if (td == R600_TD_AUTO)
WREG32_P(SCLK_PWRMGT_CNTL, 0, ~FIR_FORCE_TREND_SEL);
else
WREG32_P(SCLK_PWRMGT_CNTL, FIR_FORCE_TREND_SEL, ~FIR_FORCE_TREND_SEL);
 
if (td == R600_TD_UP)
WREG32_P(SCLK_PWRMGT_CNTL, 0, ~FIR_TREND_MODE);
 
if (td == R600_TD_DOWN)
WREG32_P(SCLK_PWRMGT_CNTL, FIR_TREND_MODE, ~FIR_TREND_MODE);
}
 
void sumo_program_vc(struct radeon_device *rdev, u32 vrc)
{
WREG32(CG_FTV, vrc);
}
 
void sumo_clear_vc(struct radeon_device *rdev)
{
WREG32(CG_FTV, 0);
}
 
void sumo_program_sstp(struct radeon_device *rdev)
{
u32 p, u;
u32 xclk = radeon_get_xclk(rdev);
 
r600_calculate_u_and_p(SUMO_SST_DFLT,
xclk, 16, &p, &u);
 
WREG32(CG_SSP, SSTU(u) | SST(p));
}
 
static void sumo_set_divider_value(struct radeon_device *rdev,
u32 index, u32 divider)
{
u32 reg_index = index / 4;
u32 field_index = index % 4;
 
if (field_index == 0)
WREG32_P(CG_SCLK_DPM_CTRL + (reg_index * 4),
SCLK_FSTATE_0_DIV(divider), ~SCLK_FSTATE_0_DIV_MASK);
else if (field_index == 1)
WREG32_P(CG_SCLK_DPM_CTRL + (reg_index * 4),
SCLK_FSTATE_1_DIV(divider), ~SCLK_FSTATE_1_DIV_MASK);
else if (field_index == 2)
WREG32_P(CG_SCLK_DPM_CTRL + (reg_index * 4),
SCLK_FSTATE_2_DIV(divider), ~SCLK_FSTATE_2_DIV_MASK);
else if (field_index == 3)
WREG32_P(CG_SCLK_DPM_CTRL + (reg_index * 4),
SCLK_FSTATE_3_DIV(divider), ~SCLK_FSTATE_3_DIV_MASK);
}
 
static void sumo_set_ds_dividers(struct radeon_device *rdev,
u32 index, u32 divider)
{
struct sumo_power_info *pi = sumo_get_pi(rdev);
 
if (pi->enable_sclk_ds) {
u32 dpm_ctrl = RREG32(CG_SCLK_DPM_CTRL_6);
 
dpm_ctrl &= ~(0x7 << (index * 3));
dpm_ctrl |= (divider << (index * 3));
WREG32(CG_SCLK_DPM_CTRL_6, dpm_ctrl);
}
}
 
static void sumo_set_ss_dividers(struct radeon_device *rdev,
u32 index, u32 divider)
{
struct sumo_power_info *pi = sumo_get_pi(rdev);
 
if (pi->enable_sclk_ds) {
u32 dpm_ctrl = RREG32(CG_SCLK_DPM_CTRL_11);
 
dpm_ctrl &= ~(0x7 << (index * 3));
dpm_ctrl |= (divider << (index * 3));
WREG32(CG_SCLK_DPM_CTRL_11, dpm_ctrl);
}
}
 
static void sumo_set_vid(struct radeon_device *rdev, u32 index, u32 vid)
{
u32 voltage_cntl = RREG32(CG_DPM_VOLTAGE_CNTL);
 
voltage_cntl &= ~(DPM_STATE0_LEVEL_MASK << (index * 2));
voltage_cntl |= (vid << (DPM_STATE0_LEVEL_SHIFT + index * 2));
WREG32(CG_DPM_VOLTAGE_CNTL, voltage_cntl);
}
 
static void sumo_set_allos_gnb_slow(struct radeon_device *rdev, u32 index, u32 gnb_slow)
{
struct sumo_power_info *pi = sumo_get_pi(rdev);
u32 temp = gnb_slow;
u32 cg_sclk_dpm_ctrl_3;
 
if (pi->driver_nbps_policy_disable)
temp = 1;
 
cg_sclk_dpm_ctrl_3 = RREG32(CG_SCLK_DPM_CTRL_3);
cg_sclk_dpm_ctrl_3 &= ~(GNB_SLOW_FSTATE_0_MASK << index);
cg_sclk_dpm_ctrl_3 |= (temp << (GNB_SLOW_FSTATE_0_SHIFT + index));
 
WREG32(CG_SCLK_DPM_CTRL_3, cg_sclk_dpm_ctrl_3);
}
 
static void sumo_program_power_level(struct radeon_device *rdev,
struct sumo_pl *pl, u32 index)
{
struct sumo_power_info *pi = sumo_get_pi(rdev);
int ret;
struct atom_clock_dividers dividers;
u32 ds_en = RREG32(DEEP_SLEEP_CNTL) & ENABLE_DS;
 
ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
pl->sclk, false, &dividers);
if (ret)
return;
 
sumo_set_divider_value(rdev, index, dividers.post_div);
 
sumo_set_vid(rdev, index, pl->vddc_index);
 
if (pl->ss_divider_index == 0 || pl->ds_divider_index == 0) {
if (ds_en)
WREG32_P(DEEP_SLEEP_CNTL, 0, ~ENABLE_DS);
} else {
sumo_set_ss_dividers(rdev, index, pl->ss_divider_index);
sumo_set_ds_dividers(rdev, index, pl->ds_divider_index);
 
if (!ds_en)
WREG32_P(DEEP_SLEEP_CNTL, ENABLE_DS, ~ENABLE_DS);
}
 
sumo_set_allos_gnb_slow(rdev, index, pl->allow_gnb_slow);
 
if (pi->enable_boost)
sumo_set_tdp_limit(rdev, index, pl->sclk_dpm_tdp_limit);
}
 
static void sumo_power_level_enable(struct radeon_device *rdev, u32 index, bool enable)
{
u32 reg_index = index / 4;
u32 field_index = index % 4;
 
if (field_index == 0)
WREG32_P(CG_SCLK_DPM_CTRL + (reg_index * 4),
enable ? SCLK_FSTATE_0_VLD : 0, ~SCLK_FSTATE_0_VLD);
else if (field_index == 1)
WREG32_P(CG_SCLK_DPM_CTRL + (reg_index * 4),
enable ? SCLK_FSTATE_1_VLD : 0, ~SCLK_FSTATE_1_VLD);
else if (field_index == 2)
WREG32_P(CG_SCLK_DPM_CTRL + (reg_index * 4),
enable ? SCLK_FSTATE_2_VLD : 0, ~SCLK_FSTATE_2_VLD);
else if (field_index == 3)
WREG32_P(CG_SCLK_DPM_CTRL + (reg_index * 4),
enable ? SCLK_FSTATE_3_VLD : 0, ~SCLK_FSTATE_3_VLD);
}
 
static bool sumo_dpm_enabled(struct radeon_device *rdev)
{
if (RREG32(CG_SCLK_DPM_CTRL_3) & DPM_SCLK_ENABLE)
return true;
else
return false;
}
 
static void sumo_start_dpm(struct radeon_device *rdev)
{
WREG32_P(CG_SCLK_DPM_CTRL_3, DPM_SCLK_ENABLE, ~DPM_SCLK_ENABLE);
}
 
static void sumo_stop_dpm(struct radeon_device *rdev)
{
WREG32_P(CG_SCLK_DPM_CTRL_3, 0, ~DPM_SCLK_ENABLE);
}
 
static void sumo_set_forced_mode(struct radeon_device *rdev, bool enable)
{
if (enable)
WREG32_P(CG_SCLK_DPM_CTRL_3, FORCE_SCLK_STATE_EN, ~FORCE_SCLK_STATE_EN);
else
WREG32_P(CG_SCLK_DPM_CTRL_3, 0, ~FORCE_SCLK_STATE_EN);
}
 
static void sumo_set_forced_mode_enabled(struct radeon_device *rdev)
{
int i;
 
sumo_set_forced_mode(rdev, true);
for (i = 0; i < rdev->usec_timeout; i++) {
if (RREG32(CG_SCLK_STATUS) & SCLK_OVERCLK_DETECT)
break;
udelay(1);
}
}
 
static void sumo_wait_for_level_0(struct radeon_device *rdev)
{
int i;
 
for (i = 0; i < rdev->usec_timeout; i++) {
if ((RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURR_SCLK_INDEX_MASK) == 0)
break;
udelay(1);
}
for (i = 0; i < rdev->usec_timeout; i++) {
if ((RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURR_INDEX_MASK) == 0)
break;
udelay(1);
}
}
 
static void sumo_set_forced_mode_disabled(struct radeon_device *rdev)
{
sumo_set_forced_mode(rdev, false);
}
 
static void sumo_enable_power_level_0(struct radeon_device *rdev)
{
sumo_power_level_enable(rdev, 0, true);
}
 
static void sumo_patch_boost_state(struct radeon_device *rdev,
struct radeon_ps *rps)
{
struct sumo_power_info *pi = sumo_get_pi(rdev);
struct sumo_ps *new_ps = sumo_get_ps(rps);
 
if (new_ps->flags & SUMO_POWERSTATE_FLAGS_BOOST_STATE) {
pi->boost_pl = new_ps->levels[new_ps->num_levels - 1];
pi->boost_pl.sclk = pi->sys_info.boost_sclk;
pi->boost_pl.vddc_index = pi->sys_info.boost_vid_2bit;
pi->boost_pl.sclk_dpm_tdp_limit = pi->sys_info.sclk_dpm_tdp_limit_boost;
}
}
 
static void sumo_pre_notify_alt_vddnb_change(struct radeon_device *rdev,
struct radeon_ps *new_rps,
struct radeon_ps *old_rps)
{
struct sumo_ps *new_ps = sumo_get_ps(new_rps);
struct sumo_ps *old_ps = sumo_get_ps(old_rps);
u32 nbps1_old = 0;
u32 nbps1_new = 0;
 
if (old_ps != NULL)
nbps1_old = (old_ps->flags & SUMO_POWERSTATE_FLAGS_FORCE_NBPS1_STATE) ? 1 : 0;
 
nbps1_new = (new_ps->flags & SUMO_POWERSTATE_FLAGS_FORCE_NBPS1_STATE) ? 1 : 0;
 
if (nbps1_old == 1 && nbps1_new == 0)
sumo_smu_notify_alt_vddnb_change(rdev, 0, 0);
}
 
static void sumo_post_notify_alt_vddnb_change(struct radeon_device *rdev,
struct radeon_ps *new_rps,
struct radeon_ps *old_rps)
{
struct sumo_ps *new_ps = sumo_get_ps(new_rps);
struct sumo_ps *old_ps = sumo_get_ps(old_rps);
u32 nbps1_old = 0;
u32 nbps1_new = 0;
 
if (old_ps != NULL)