Subversion Repositories Kolibri OS

Rev

Rev 5271 | Go to most recent revision | Details | Last modification | View Log | RSS feed

Rev Author Line No. Line
5078 serge 1
/*
2
 * Copyright 2013 Advanced Micro Devices, Inc.
3
 *
4
 * Permission is hereby granted, free of charge, to any person obtaining a
5
 * copy of this software and associated documentation files (the "Software"),
6
 * to deal in the Software without restriction, including without limitation
7
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
 * and/or sell copies of the Software, and to permit persons to whom the
9
 * Software is furnished to do so, subject to the following conditions:
10
 *
11
 * The above copyright notice and this permission notice shall be included in
12
 * all copies or substantial portions of the Software.
13
 *
14
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20
 * OTHER DEALINGS IN THE SOFTWARE.
21
 *
22
 */
23
 
24
#include 
25
#include "drmP.h"
26
#include "radeon.h"
27
#include "radeon_ucode.h"
28
#include "cikd.h"
29
#include "r600_dpm.h"
30
#include "ci_dpm.h"
31
#include "atom.h"
32
#include 
33
 
34
#define MC_CG_ARB_FREQ_F0           0x0a
35
#define MC_CG_ARB_FREQ_F1           0x0b
36
#define MC_CG_ARB_FREQ_F2           0x0c
37
#define MC_CG_ARB_FREQ_F3           0x0d
38
 
39
#define SMC_RAM_END 0x40000
40
 
41
#define VOLTAGE_SCALE               4
42
#define VOLTAGE_VID_OFFSET_SCALE1    625
43
#define VOLTAGE_VID_OFFSET_SCALE2    100
44
 
45
static const struct ci_pt_defaults defaults_hawaii_xt =
46
{
47
	1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0xB0000,
48
	{ 0x84,  0x0,   0x0,   0x7F,  0x0,   0x0,   0x5A,  0x60,  0x51,  0x8E,  0x79,  0x6B,  0x5F,  0x90,  0x79  },
49
	{ 0x1EA, 0x1EA, 0x1EA, 0x224, 0x224, 0x224, 0x24F, 0x24F, 0x24F, 0x28E, 0x28E, 0x28E, 0x2BC, 0x2BC, 0x2BC }
50
};
51
 
52
static const struct ci_pt_defaults defaults_hawaii_pro =
53
{
54
	1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0x65062,
55
	{ 0x93,  0x0,   0x0,   0x97,  0x0,   0x0,   0x6B,  0x60,  0x51,  0x95,  0x79,  0x6B,  0x5F,  0x90,  0x79  },
56
	{ 0x1EA, 0x1EA, 0x1EA, 0x224, 0x224, 0x224, 0x24F, 0x24F, 0x24F, 0x28E, 0x28E, 0x28E, 0x2BC, 0x2BC, 0x2BC }
57
};
58
 
59
static const struct ci_pt_defaults defaults_bonaire_xt =
60
{
61
	1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000,
62
	{ 0x79,  0x253, 0x25D, 0xAE,  0x72,  0x80,  0x83,  0x86,  0x6F,  0xC8,  0xC9,  0xC9,  0x2F,  0x4D,  0x61  },
63
	{ 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 }
64
};
65
 
66
static const struct ci_pt_defaults defaults_bonaire_pro =
67
{
68
	1, 0xF, 0xFD, 0x19, 5, 45, 0, 0x65062,
69
	{ 0x8C,  0x23F, 0x244, 0xA6,  0x83,  0x85,  0x86,  0x86,  0x83,  0xDB,  0xDB,  0xDA,  0x67,  0x60,  0x5F  },
70
	{ 0x187, 0x193, 0x193, 0x1C7, 0x1D1, 0x1D1, 0x210, 0x219, 0x219, 0x266, 0x26C, 0x26C, 0x2C9, 0x2CB, 0x2CB }
71
};
72
 
73
static const struct ci_pt_defaults defaults_saturn_xt =
74
{
75
	1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x70000,
76
	{ 0x8C,  0x247, 0x249, 0xA6,  0x80,  0x81,  0x8B,  0x89,  0x86,  0xC9,  0xCA,  0xC9,  0x4D,  0x4D,  0x4D  },
77
	{ 0x187, 0x187, 0x187, 0x1C7, 0x1C7, 0x1C7, 0x210, 0x210, 0x210, 0x266, 0x266, 0x266, 0x2C9, 0x2C9, 0x2C9 }
78
};
79
 
80
static const struct ci_pt_defaults defaults_saturn_pro =
81
{
82
	1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x30000,
83
	{ 0x96,  0x21D, 0x23B, 0xA1,  0x85,  0x87,  0x83,  0x84,  0x81,  0xE6,  0xE6,  0xE6,  0x71,  0x6A,  0x6A  },
84
	{ 0x193, 0x19E, 0x19E, 0x1D2, 0x1DC, 0x1DC, 0x21A, 0x223, 0x223, 0x26E, 0x27E, 0x274, 0x2CF, 0x2D2, 0x2D2 }
85
};
86
 
87
static const struct ci_pt_config_reg didt_config_ci[] =
88
{
89
	{ 0x10, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
90
	{ 0x10, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
91
	{ 0x10, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
92
	{ 0x10, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
93
	{ 0x11, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
94
	{ 0x11, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
95
	{ 0x11, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
96
	{ 0x11, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
97
	{ 0x12, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
98
	{ 0x12, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
99
	{ 0x12, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
100
	{ 0x12, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
101
	{ 0x2, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
102
	{ 0x2, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
103
	{ 0x2, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
104
	{ 0x1, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
105
	{ 0x1, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
106
	{ 0x0, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
107
	{ 0x30, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
108
	{ 0x30, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
109
	{ 0x30, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
110
	{ 0x30, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
111
	{ 0x31, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
112
	{ 0x31, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
113
	{ 0x31, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
114
	{ 0x31, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
115
	{ 0x32, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
116
	{ 0x32, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
117
	{ 0x32, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
118
	{ 0x32, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
119
	{ 0x22, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
120
	{ 0x22, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
121
	{ 0x22, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
122
	{ 0x21, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
123
	{ 0x21, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
124
	{ 0x20, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
125
	{ 0x50, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
126
	{ 0x50, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
127
	{ 0x50, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
128
	{ 0x50, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
129
	{ 0x51, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
130
	{ 0x51, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
131
	{ 0x51, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
132
	{ 0x51, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
133
	{ 0x52, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
134
	{ 0x52, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
135
	{ 0x52, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
136
	{ 0x52, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
137
	{ 0x42, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
138
	{ 0x42, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
139
	{ 0x42, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
140
	{ 0x41, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
141
	{ 0x41, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
142
	{ 0x40, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
143
	{ 0x70, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
144
	{ 0x70, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
145
	{ 0x70, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
146
	{ 0x70, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
147
	{ 0x71, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
148
	{ 0x71, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
149
	{ 0x71, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
150
	{ 0x71, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
151
	{ 0x72, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
152
	{ 0x72, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
153
	{ 0x72, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
154
	{ 0x72, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
155
	{ 0x62, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
156
	{ 0x62, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
157
	{ 0x62, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
158
	{ 0x61, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
159
	{ 0x61, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
160
	{ 0x60, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
161
	{ 0xFFFFFFFF }
162
};
163
 
164
extern u8 rv770_get_memory_module_index(struct radeon_device *rdev);
165
extern void btc_get_max_clock_from_voltage_dependency_table(struct radeon_clock_voltage_dependency_table *table,
166
							    u32 *max_clock);
167
extern int ni_copy_and_switch_arb_sets(struct radeon_device *rdev,
168
				       u32 arb_freq_src, u32 arb_freq_dest);
169
extern u8 si_get_ddr3_mclk_frequency_ratio(u32 memory_clock);
170
extern u8 si_get_mclk_frequency_ratio(u32 memory_clock, bool strobe_mode);
171
extern void si_trim_voltage_table_to_fit_state_table(struct radeon_device *rdev,
172
						     u32 max_voltage_steps,
173
						     struct atom_voltage_table *voltage_table);
174
extern void cik_enter_rlc_safe_mode(struct radeon_device *rdev);
175
extern void cik_exit_rlc_safe_mode(struct radeon_device *rdev);
176
extern int ci_mc_load_microcode(struct radeon_device *rdev);
177
extern void cik_update_cg(struct radeon_device *rdev,
178
			  u32 block, bool enable);
179
 
180
static int ci_get_std_voltage_value_sidd(struct radeon_device *rdev,
181
					 struct atom_voltage_table_entry *voltage_table,
182
					 u16 *std_voltage_hi_sidd, u16 *std_voltage_lo_sidd);
183
static int ci_set_power_limit(struct radeon_device *rdev, u32 n);
184
static int ci_set_overdrive_target_tdp(struct radeon_device *rdev,
185
				       u32 target_tdp);
186
static int ci_update_uvd_dpm(struct radeon_device *rdev, bool gate);
187
 
188
static struct ci_power_info *ci_get_pi(struct radeon_device *rdev)
189
{
190
        struct ci_power_info *pi = rdev->pm.dpm.priv;
191
 
192
        return pi;
193
}
194
 
195
static struct ci_ps *ci_get_ps(struct radeon_ps *rps)
196
{
197
	struct ci_ps *ps = rps->ps_priv;
198
 
199
	return ps;
200
}
201
 
202
static void ci_initialize_powertune_defaults(struct radeon_device *rdev)
203
{
204
	struct ci_power_info *pi = ci_get_pi(rdev);
205
 
206
	switch (rdev->pdev->device) {
207
	case 0x6649:
208
	case 0x6650:
209
	case 0x6651:
210
	case 0x6658:
211
	case 0x665C:
212
	case 0x665D:
213
	default:
214
		pi->powertune_defaults = &defaults_bonaire_xt;
215
		break;
216
	case 0x6640:
217
	case 0x6641:
218
	case 0x6646:
219
	case 0x6647:
220
		pi->powertune_defaults = &defaults_saturn_xt;
221
		break;
222
	case 0x67B8:
223
	case 0x67B0:
224
		pi->powertune_defaults = &defaults_hawaii_xt;
225
		break;
226
	case 0x67BA:
227
	case 0x67B1:
228
		pi->powertune_defaults = &defaults_hawaii_pro;
229
		break;
230
	case 0x67A0:
231
	case 0x67A1:
232
	case 0x67A2:
233
	case 0x67A8:
234
	case 0x67A9:
235
	case 0x67AA:
236
	case 0x67B9:
237
	case 0x67BE:
238
		pi->powertune_defaults = &defaults_bonaire_xt;
239
		break;
240
	}
241
 
242
	pi->dte_tj_offset = 0;
243
 
244
	pi->caps_power_containment = true;
245
	pi->caps_cac = false;
246
	pi->caps_sq_ramping = false;
247
	pi->caps_db_ramping = false;
248
	pi->caps_td_ramping = false;
249
	pi->caps_tcp_ramping = false;
250
 
251
	if (pi->caps_power_containment) {
252
		pi->caps_cac = true;
253
		pi->enable_bapm_feature = true;
254
		pi->enable_tdc_limit_feature = true;
255
		pi->enable_pkg_pwr_tracking_feature = true;
256
	}
257
}
258
 
259
static u8 ci_convert_to_vid(u16 vddc)
260
{
261
	return (6200 - (vddc * VOLTAGE_SCALE)) / 25;
262
}
263
 
264
static int ci_populate_bapm_vddc_vid_sidd(struct radeon_device *rdev)
265
{
266
	struct ci_power_info *pi = ci_get_pi(rdev);
267
	u8 *hi_vid = pi->smc_powertune_table.BapmVddCVidHiSidd;
268
	u8 *lo_vid = pi->smc_powertune_table.BapmVddCVidLoSidd;
269
	u8 *hi2_vid = pi->smc_powertune_table.BapmVddCVidHiSidd2;
270
	u32 i;
271
 
272
	if (rdev->pm.dpm.dyn_state.cac_leakage_table.entries == NULL)
273
		return -EINVAL;
274
	if (rdev->pm.dpm.dyn_state.cac_leakage_table.count > 8)
275
		return -EINVAL;
276
	if (rdev->pm.dpm.dyn_state.cac_leakage_table.count !=
277
	    rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count)
278
		return -EINVAL;
279
 
280
	for (i = 0; i < rdev->pm.dpm.dyn_state.cac_leakage_table.count; i++) {
281
		if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
282
			lo_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1);
283
			hi_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2);
284
			hi2_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3);
285
		} else {
286
			lo_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc);
287
			hi_vid[i] = ci_convert_to_vid((u16)rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage);
288
		}
289
	}
290
	return 0;
291
}
292
 
293
static int ci_populate_vddc_vid(struct radeon_device *rdev)
294
{
295
	struct ci_power_info *pi = ci_get_pi(rdev);
296
	u8 *vid = pi->smc_powertune_table.VddCVid;
297
	u32 i;
298
 
299
	if (pi->vddc_voltage_table.count > 8)
300
		return -EINVAL;
301
 
302
	for (i = 0; i < pi->vddc_voltage_table.count; i++)
303
		vid[i] = ci_convert_to_vid(pi->vddc_voltage_table.entries[i].value);
304
 
305
	return 0;
306
}
307
 
308
static int ci_populate_svi_load_line(struct radeon_device *rdev)
309
{
310
	struct ci_power_info *pi = ci_get_pi(rdev);
311
	const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
312
 
313
	pi->smc_powertune_table.SviLoadLineEn = pt_defaults->svi_load_line_en;
314
	pi->smc_powertune_table.SviLoadLineVddC = pt_defaults->svi_load_line_vddc;
315
	pi->smc_powertune_table.SviLoadLineTrimVddC = 3;
316
	pi->smc_powertune_table.SviLoadLineOffsetVddC = 0;
317
 
318
	return 0;
319
}
320
 
321
static int ci_populate_tdc_limit(struct radeon_device *rdev)
322
{
323
	struct ci_power_info *pi = ci_get_pi(rdev);
324
	const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
325
	u16 tdc_limit;
326
 
327
	tdc_limit = rdev->pm.dpm.dyn_state.cac_tdp_table->tdc * 256;
328
	pi->smc_powertune_table.TDC_VDDC_PkgLimit = cpu_to_be16(tdc_limit);
329
	pi->smc_powertune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
330
		pt_defaults->tdc_vddc_throttle_release_limit_perc;
331
	pi->smc_powertune_table.TDC_MAWt = pt_defaults->tdc_mawt;
332
 
333
	return 0;
334
}
335
 
336
static int ci_populate_dw8(struct radeon_device *rdev)
337
{
338
	struct ci_power_info *pi = ci_get_pi(rdev);
339
	const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
340
	int ret;
341
 
342
	ret = ci_read_smc_sram_dword(rdev,
343
				     SMU7_FIRMWARE_HEADER_LOCATION +
344
				     offsetof(SMU7_Firmware_Header, PmFuseTable) +
345
				     offsetof(SMU7_Discrete_PmFuses, TdcWaterfallCtl),
346
				     (u32 *)&pi->smc_powertune_table.TdcWaterfallCtl,
347
				     pi->sram_end);
348
	if (ret)
349
		return -EINVAL;
350
	else
351
		pi->smc_powertune_table.TdcWaterfallCtl = pt_defaults->tdc_waterfall_ctl;
352
 
353
	return 0;
354
}
355
 
356
static int ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(struct radeon_device *rdev)
357
{
358
	struct ci_power_info *pi = ci_get_pi(rdev);
359
	u8 *hi_vid = pi->smc_powertune_table.BapmVddCVidHiSidd;
360
	u8 *lo_vid = pi->smc_powertune_table.BapmVddCVidLoSidd;
361
	int i, min, max;
362
 
363
	min = max = hi_vid[0];
364
	for (i = 0; i < 8; i++) {
365
		if (0 != hi_vid[i]) {
366
			if (min > hi_vid[i])
367
				min = hi_vid[i];
368
			if (max < hi_vid[i])
369
				max = hi_vid[i];
370
		}
371
 
372
		if (0 != lo_vid[i]) {
373
			if (min > lo_vid[i])
374
				min = lo_vid[i];
375
			if (max < lo_vid[i])
376
				max = lo_vid[i];
377
		}
378
	}
379
 
380
	if ((min == 0) || (max == 0))
381
		return -EINVAL;
382
	pi->smc_powertune_table.GnbLPMLMaxVid = (u8)max;
383
	pi->smc_powertune_table.GnbLPMLMinVid = (u8)min;
384
 
385
	return 0;
386
}
387
 
388
static int ci_populate_bapm_vddc_base_leakage_sidd(struct radeon_device *rdev)
389
{
390
	struct ci_power_info *pi = ci_get_pi(rdev);
391
	u16 hi_sidd = pi->smc_powertune_table.BapmVddCBaseLeakageHiSidd;
392
	u16 lo_sidd = pi->smc_powertune_table.BapmVddCBaseLeakageLoSidd;
393
	struct radeon_cac_tdp_table *cac_tdp_table =
394
		rdev->pm.dpm.dyn_state.cac_tdp_table;
395
 
396
	hi_sidd = cac_tdp_table->high_cac_leakage / 100 * 256;
397
	lo_sidd = cac_tdp_table->low_cac_leakage / 100 * 256;
398
 
399
	pi->smc_powertune_table.BapmVddCBaseLeakageHiSidd = cpu_to_be16(hi_sidd);
400
	pi->smc_powertune_table.BapmVddCBaseLeakageLoSidd = cpu_to_be16(lo_sidd);
401
 
402
	return 0;
403
}
404
 
405
static int ci_populate_bapm_parameters_in_dpm_table(struct radeon_device *rdev)
406
{
407
	struct ci_power_info *pi = ci_get_pi(rdev);
408
	const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
409
	SMU7_Discrete_DpmTable  *dpm_table = &pi->smc_state_table;
410
	struct radeon_cac_tdp_table *cac_tdp_table =
411
		rdev->pm.dpm.dyn_state.cac_tdp_table;
412
	struct radeon_ppm_table *ppm = rdev->pm.dpm.dyn_state.ppm_table;
413
	int i, j, k;
414
	const u16 *def1;
415
	const u16 *def2;
416
 
417
	dpm_table->DefaultTdp = cac_tdp_table->tdp * 256;
418
	dpm_table->TargetTdp = cac_tdp_table->configurable_tdp * 256;
419
 
420
	dpm_table->DTETjOffset = (u8)pi->dte_tj_offset;
421
	dpm_table->GpuTjMax =
422
		(u8)(pi->thermal_temp_setting.temperature_high / 1000);
423
	dpm_table->GpuTjHyst = 8;
424
 
425
	dpm_table->DTEAmbientTempBase = pt_defaults->dte_ambient_temp_base;
426
 
427
	if (ppm) {
428
		dpm_table->PPM_PkgPwrLimit = cpu_to_be16((u16)ppm->dgpu_tdp * 256 / 1000);
429
		dpm_table->PPM_TemperatureLimit = cpu_to_be16((u16)ppm->tj_max * 256);
430
	} else {
431
		dpm_table->PPM_PkgPwrLimit = cpu_to_be16(0);
432
		dpm_table->PPM_TemperatureLimit = cpu_to_be16(0);
433
	}
434
 
435
	dpm_table->BAPM_TEMP_GRADIENT = cpu_to_be32(pt_defaults->bapm_temp_gradient);
436
	def1 = pt_defaults->bapmti_r;
437
	def2 = pt_defaults->bapmti_rc;
438
 
439
	for (i = 0; i < SMU7_DTE_ITERATIONS; i++) {
440
		for (j = 0; j < SMU7_DTE_SOURCES; j++) {
441
			for (k = 0; k < SMU7_DTE_SINKS; k++) {
442
				dpm_table->BAPMTI_R[i][j][k] = cpu_to_be16(*def1);
443
				dpm_table->BAPMTI_RC[i][j][k] = cpu_to_be16(*def2);
444
				def1++;
445
				def2++;
446
			}
447
		}
448
	}
449
 
450
	return 0;
451
}
452
 
453
static int ci_populate_pm_base(struct radeon_device *rdev)
454
{
455
	struct ci_power_info *pi = ci_get_pi(rdev);
456
	u32 pm_fuse_table_offset;
457
	int ret;
458
 
459
	if (pi->caps_power_containment) {
460
		ret = ci_read_smc_sram_dword(rdev,
461
					     SMU7_FIRMWARE_HEADER_LOCATION +
462
					     offsetof(SMU7_Firmware_Header, PmFuseTable),
463
					     &pm_fuse_table_offset, pi->sram_end);
464
		if (ret)
465
			return ret;
466
		ret = ci_populate_bapm_vddc_vid_sidd(rdev);
467
		if (ret)
468
			return ret;
469
		ret = ci_populate_vddc_vid(rdev);
470
		if (ret)
471
			return ret;
472
		ret = ci_populate_svi_load_line(rdev);
473
		if (ret)
474
			return ret;
475
		ret = ci_populate_tdc_limit(rdev);
476
		if (ret)
477
			return ret;
478
		ret = ci_populate_dw8(rdev);
479
		if (ret)
480
			return ret;
481
		ret = ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(rdev);
482
		if (ret)
483
			return ret;
484
		ret = ci_populate_bapm_vddc_base_leakage_sidd(rdev);
485
		if (ret)
486
			return ret;
487
		ret = ci_copy_bytes_to_smc(rdev, pm_fuse_table_offset,
488
					   (u8 *)&pi->smc_powertune_table,
489
					   sizeof(SMU7_Discrete_PmFuses), pi->sram_end);
490
		if (ret)
491
			return ret;
492
	}
493
 
494
	return 0;
495
}
496
 
497
static void ci_do_enable_didt(struct radeon_device *rdev, const bool enable)
498
{
499
	struct ci_power_info *pi = ci_get_pi(rdev);
500
	u32 data;
501
 
502
	if (pi->caps_sq_ramping) {
503
		data = RREG32_DIDT(DIDT_SQ_CTRL0);
504
		if (enable)
505
			data |= DIDT_CTRL_EN;
506
		else
507
			data &= ~DIDT_CTRL_EN;
508
		WREG32_DIDT(DIDT_SQ_CTRL0, data);
509
	}
510
 
511
	if (pi->caps_db_ramping) {
512
		data = RREG32_DIDT(DIDT_DB_CTRL0);
513
		if (enable)
514
			data |= DIDT_CTRL_EN;
515
		else
516
			data &= ~DIDT_CTRL_EN;
517
		WREG32_DIDT(DIDT_DB_CTRL0, data);
518
	}
519
 
520
	if (pi->caps_td_ramping) {
521
		data = RREG32_DIDT(DIDT_TD_CTRL0);
522
		if (enable)
523
			data |= DIDT_CTRL_EN;
524
		else
525
			data &= ~DIDT_CTRL_EN;
526
		WREG32_DIDT(DIDT_TD_CTRL0, data);
527
	}
528
 
529
	if (pi->caps_tcp_ramping) {
530
		data = RREG32_DIDT(DIDT_TCP_CTRL0);
531
		if (enable)
532
			data |= DIDT_CTRL_EN;
533
		else
534
			data &= ~DIDT_CTRL_EN;
535
		WREG32_DIDT(DIDT_TCP_CTRL0, data);
536
	}
537
}
538
 
539
static int ci_program_pt_config_registers(struct radeon_device *rdev,
540
					  const struct ci_pt_config_reg *cac_config_regs)
541
{
542
	const struct ci_pt_config_reg *config_regs = cac_config_regs;
543
	u32 data;
544
	u32 cache = 0;
545
 
546
	if (config_regs == NULL)
547
		return -EINVAL;
548
 
549
	while (config_regs->offset != 0xFFFFFFFF) {
550
		if (config_regs->type == CISLANDS_CONFIGREG_CACHE) {
551
			cache |= ((config_regs->value << config_regs->shift) & config_regs->mask);
552
		} else {
553
			switch (config_regs->type) {
554
			case CISLANDS_CONFIGREG_SMC_IND:
555
				data = RREG32_SMC(config_regs->offset);
556
				break;
557
			case CISLANDS_CONFIGREG_DIDT_IND:
558
				data = RREG32_DIDT(config_regs->offset);
559
				break;
560
			default:
561
				data = RREG32(config_regs->offset << 2);
562
				break;
563
			}
564
 
565
			data &= ~config_regs->mask;
566
			data |= ((config_regs->value << config_regs->shift) & config_regs->mask);
567
			data |= cache;
568
 
569
			switch (config_regs->type) {
570
			case CISLANDS_CONFIGREG_SMC_IND:
571
				WREG32_SMC(config_regs->offset, data);
572
				break;
573
			case CISLANDS_CONFIGREG_DIDT_IND:
574
				WREG32_DIDT(config_regs->offset, data);
575
				break;
576
			default:
577
				WREG32(config_regs->offset << 2, data);
578
				break;
579
			}
580
			cache = 0;
581
		}
582
		config_regs++;
583
	}
584
	return 0;
585
}
586
 
587
static int ci_enable_didt(struct radeon_device *rdev, bool enable)
588
{
589
	struct ci_power_info *pi = ci_get_pi(rdev);
590
	int ret;
591
 
592
	if (pi->caps_sq_ramping || pi->caps_db_ramping ||
593
	    pi->caps_td_ramping || pi->caps_tcp_ramping) {
594
		cik_enter_rlc_safe_mode(rdev);
595
 
596
		if (enable) {
597
			ret = ci_program_pt_config_registers(rdev, didt_config_ci);
598
			if (ret) {
599
				cik_exit_rlc_safe_mode(rdev);
600
				return ret;
601
			}
602
		}
603
 
604
		ci_do_enable_didt(rdev, enable);
605
 
606
		cik_exit_rlc_safe_mode(rdev);
607
	}
608
 
609
	return 0;
610
}
611
 
612
static int ci_enable_power_containment(struct radeon_device *rdev, bool enable)
613
{
614
	struct ci_power_info *pi = ci_get_pi(rdev);
615
	PPSMC_Result smc_result;
616
	int ret = 0;
617
 
618
	if (enable) {
619
		pi->power_containment_features = 0;
620
		if (pi->caps_power_containment) {
621
			if (pi->enable_bapm_feature) {
622
				smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableDTE);
623
				if (smc_result != PPSMC_Result_OK)
624
					ret = -EINVAL;
625
				else
626
					pi->power_containment_features |= POWERCONTAINMENT_FEATURE_BAPM;
627
			}
628
 
629
			if (pi->enable_tdc_limit_feature) {
630
				smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_TDCLimitEnable);
631
				if (smc_result != PPSMC_Result_OK)
632
					ret = -EINVAL;
633
				else
634
					pi->power_containment_features |= POWERCONTAINMENT_FEATURE_TDCLimit;
635
			}
636
 
637
			if (pi->enable_pkg_pwr_tracking_feature) {
638
				smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_PkgPwrLimitEnable);
639
				if (smc_result != PPSMC_Result_OK) {
640
					ret = -EINVAL;
641
				} else {
642
					struct radeon_cac_tdp_table *cac_tdp_table =
643
						rdev->pm.dpm.dyn_state.cac_tdp_table;
644
					u32 default_pwr_limit =
645
						(u32)(cac_tdp_table->maximum_power_delivery_limit * 256);
646
 
647
					pi->power_containment_features |= POWERCONTAINMENT_FEATURE_PkgPwrLimit;
648
 
649
					ci_set_power_limit(rdev, default_pwr_limit);
650
				}
651
			}
652
		}
653
	} else {
654
		if (pi->caps_power_containment && pi->power_containment_features) {
655
			if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_TDCLimit)
656
				ci_send_msg_to_smc(rdev, PPSMC_MSG_TDCLimitDisable);
657
 
658
			if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_BAPM)
659
				ci_send_msg_to_smc(rdev, PPSMC_MSG_DisableDTE);
660
 
661
			if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_PkgPwrLimit)
662
				ci_send_msg_to_smc(rdev, PPSMC_MSG_PkgPwrLimitDisable);
663
			pi->power_containment_features = 0;
664
		}
665
	}
666
 
667
	return ret;
668
}
669
 
670
static int ci_enable_smc_cac(struct radeon_device *rdev, bool enable)
671
{
672
	struct ci_power_info *pi = ci_get_pi(rdev);
673
	PPSMC_Result smc_result;
674
	int ret = 0;
675
 
676
	if (pi->caps_cac) {
677
		if (enable) {
678
			smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableCac);
679
			if (smc_result != PPSMC_Result_OK) {
680
				ret = -EINVAL;
681
				pi->cac_enabled = false;
682
			} else {
683
				pi->cac_enabled = true;
684
			}
685
		} else if (pi->cac_enabled) {
686
			ci_send_msg_to_smc(rdev, PPSMC_MSG_DisableCac);
687
			pi->cac_enabled = false;
688
		}
689
	}
690
 
691
	return ret;
692
}
693
 
694
static int ci_power_control_set_level(struct radeon_device *rdev)
695
{
696
	struct ci_power_info *pi = ci_get_pi(rdev);
697
	struct radeon_cac_tdp_table *cac_tdp_table =
698
		rdev->pm.dpm.dyn_state.cac_tdp_table;
699
	s32 adjust_percent;
700
	s32 target_tdp;
701
	int ret = 0;
702
	bool adjust_polarity = false; /* ??? */
703
 
704
	if (pi->caps_power_containment &&
705
	    (pi->power_containment_features & POWERCONTAINMENT_FEATURE_BAPM)) {
706
		adjust_percent = adjust_polarity ?
707
			rdev->pm.dpm.tdp_adjustment : (-1 * rdev->pm.dpm.tdp_adjustment);
708
		target_tdp = ((100 + adjust_percent) *
709
			      (s32)cac_tdp_table->configurable_tdp) / 100;
710
		target_tdp *= 256;
711
 
712
		ret = ci_set_overdrive_target_tdp(rdev, (u32)target_tdp);
713
	}
714
 
715
	return ret;
716
}
717
 
718
void ci_dpm_powergate_uvd(struct radeon_device *rdev, bool gate)
719
{
720
	struct ci_power_info *pi = ci_get_pi(rdev);
721
 
722
	if (pi->uvd_power_gated == gate)
723
		return;
724
 
725
	pi->uvd_power_gated = gate;
726
 
727
	ci_update_uvd_dpm(rdev, gate);
728
}
729
 
730
bool ci_dpm_vblank_too_short(struct radeon_device *rdev)
731
{
732
	struct ci_power_info *pi = ci_get_pi(rdev);
733
	u32 vblank_time = r600_dpm_get_vblank_time(rdev);
734
	u32 switch_limit = pi->mem_gddr5 ? 450 : 300;
735
 
736
	if (vblank_time < switch_limit)
737
		return true;
738
	else
739
		return false;
740
 
741
}
742
 
743
static void ci_apply_state_adjust_rules(struct radeon_device *rdev,
744
					struct radeon_ps *rps)
745
{
746
	struct ci_ps *ps = ci_get_ps(rps);
747
	struct ci_power_info *pi = ci_get_pi(rdev);
748
	struct radeon_clock_and_voltage_limits *max_limits;
749
	bool disable_mclk_switching;
750
	u32 sclk, mclk;
751
	u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc;
752
	int i;
753
 
754
	if (rps->vce_active) {
755
		rps->evclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].evclk;
756
		rps->ecclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].ecclk;
757
	} else {
758
		rps->evclk = 0;
759
		rps->ecclk = 0;
760
	}
761
 
762
	if ((rdev->pm.dpm.new_active_crtc_count > 1) ||
763
	    ci_dpm_vblank_too_short(rdev))
764
		disable_mclk_switching = true;
765
	else
766
		disable_mclk_switching = false;
767
 
768
	if ((rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY)
769
		pi->battery_state = true;
770
	else
771
		pi->battery_state = false;
772
 
773
	if (rdev->pm.dpm.ac_power)
774
		max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
775
	else
776
		max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
777
 
778
	if (rdev->pm.dpm.ac_power == false) {
779
		for (i = 0; i < ps->performance_level_count; i++) {
780
			if (ps->performance_levels[i].mclk > max_limits->mclk)
781
				ps->performance_levels[i].mclk = max_limits->mclk;
782
			if (ps->performance_levels[i].sclk > max_limits->sclk)
783
				ps->performance_levels[i].sclk = max_limits->sclk;
784
		}
785
	}
786
 
787
	/* limit clocks to max supported clocks based on voltage dependency tables */
788
	btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
789
							&max_sclk_vddc);
790
	btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
791
							&max_mclk_vddci);
792
	btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
793
							&max_mclk_vddc);
794
 
795
	for (i = 0; i < ps->performance_level_count; i++) {
796
		if (max_sclk_vddc) {
797
			if (ps->performance_levels[i].sclk > max_sclk_vddc)
798
				ps->performance_levels[i].sclk = max_sclk_vddc;
799
		}
800
		if (max_mclk_vddci) {
801
			if (ps->performance_levels[i].mclk > max_mclk_vddci)
802
				ps->performance_levels[i].mclk = max_mclk_vddci;
803
		}
804
		if (max_mclk_vddc) {
805
			if (ps->performance_levels[i].mclk > max_mclk_vddc)
806
				ps->performance_levels[i].mclk = max_mclk_vddc;
807
		}
808
	}
809
 
810
	/* XXX validate the min clocks required for display */
811
 
812
	if (disable_mclk_switching) {
813
		mclk  = ps->performance_levels[ps->performance_level_count - 1].mclk;
814
		sclk = ps->performance_levels[0].sclk;
815
	} else {
816
		mclk = ps->performance_levels[0].mclk;
817
		sclk = ps->performance_levels[0].sclk;
818
	}
819
 
820
	if (rps->vce_active) {
821
		if (sclk < rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].sclk)
822
			sclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].sclk;
823
		if (mclk < rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].mclk)
824
			mclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].mclk;
825
	}
826
 
827
	ps->performance_levels[0].sclk = sclk;
828
	ps->performance_levels[0].mclk = mclk;
829
 
830
	if (ps->performance_levels[1].sclk < ps->performance_levels[0].sclk)
831
		ps->performance_levels[1].sclk = ps->performance_levels[0].sclk;
832
 
833
	if (disable_mclk_switching) {
834
		if (ps->performance_levels[0].mclk < ps->performance_levels[1].mclk)
835
			ps->performance_levels[0].mclk = ps->performance_levels[1].mclk;
836
	} else {
837
		if (ps->performance_levels[1].mclk < ps->performance_levels[0].mclk)
838
			ps->performance_levels[1].mclk = ps->performance_levels[0].mclk;
839
	}
840
}
841
 
842
static int ci_set_thermal_temperature_range(struct radeon_device *rdev,
843
					    int min_temp, int max_temp)
844
{
845
	int low_temp = 0 * 1000;
846
	int high_temp = 255 * 1000;
847
	u32 tmp;
848
 
849
	if (low_temp < min_temp)
850
		low_temp = min_temp;
851
	if (high_temp > max_temp)
852
		high_temp = max_temp;
853
	if (high_temp < low_temp) {
854
		DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp);
855
		return -EINVAL;
856
	}
857
 
858
	tmp = RREG32_SMC(CG_THERMAL_INT);
859
	tmp &= ~(CI_DIG_THERM_INTH_MASK | CI_DIG_THERM_INTL_MASK);
860
	tmp |= CI_DIG_THERM_INTH(high_temp / 1000) |
861
		CI_DIG_THERM_INTL(low_temp / 1000);
862
	WREG32_SMC(CG_THERMAL_INT, tmp);
863
 
864
#if 0
865
	/* XXX: need to figure out how to handle this properly */
866
	tmp = RREG32_SMC(CG_THERMAL_CTRL);
867
	tmp &= DIG_THERM_DPM_MASK;
868
	tmp |= DIG_THERM_DPM(high_temp / 1000);
869
	WREG32_SMC(CG_THERMAL_CTRL, tmp);
870
#endif
871
 
872
	rdev->pm.dpm.thermal.min_temp = low_temp;
873
	rdev->pm.dpm.thermal.max_temp = high_temp;
874
 
875
	return 0;
876
}
877
 
878
#if 0
879
static int ci_read_smc_soft_register(struct radeon_device *rdev,
880
				     u16 reg_offset, u32 *value)
881
{
882
	struct ci_power_info *pi = ci_get_pi(rdev);
883
 
884
	return ci_read_smc_sram_dword(rdev,
885
				      pi->soft_regs_start + reg_offset,
886
				      value, pi->sram_end);
887
}
888
#endif
889
 
890
static int ci_write_smc_soft_register(struct radeon_device *rdev,
891
				      u16 reg_offset, u32 value)
892
{
893
	struct ci_power_info *pi = ci_get_pi(rdev);
894
 
895
	return ci_write_smc_sram_dword(rdev,
896
				       pi->soft_regs_start + reg_offset,
897
				       value, pi->sram_end);
898
}
899
 
900
static void ci_init_fps_limits(struct radeon_device *rdev)
901
{
902
	struct ci_power_info *pi = ci_get_pi(rdev);
903
	SMU7_Discrete_DpmTable *table = &pi->smc_state_table;
904
 
905
	if (pi->caps_fps) {
906
		u16 tmp;
907
 
908
		tmp = 45;
909
		table->FpsHighT = cpu_to_be16(tmp);
910
 
911
		tmp = 30;
912
		table->FpsLowT = cpu_to_be16(tmp);
913
	}
914
}
915
 
916
static int ci_update_sclk_t(struct radeon_device *rdev)
917
{
918
	struct ci_power_info *pi = ci_get_pi(rdev);
919
	int ret = 0;
920
	u32 low_sclk_interrupt_t = 0;
921
 
922
	if (pi->caps_sclk_throttle_low_notification) {
923
		low_sclk_interrupt_t = cpu_to_be32(pi->low_sclk_interrupt_t);
924
 
925
		ret = ci_copy_bytes_to_smc(rdev,
926
					   pi->dpm_table_start +
927
					   offsetof(SMU7_Discrete_DpmTable, LowSclkInterruptT),
928
					   (u8 *)&low_sclk_interrupt_t,
929
					   sizeof(u32), pi->sram_end);
930
 
931
	}
932
 
933
	return ret;
934
}
935
 
936
static void ci_get_leakage_voltages(struct radeon_device *rdev)
937
{
938
	struct ci_power_info *pi = ci_get_pi(rdev);
939
	u16 leakage_id, virtual_voltage_id;
940
	u16 vddc, vddci;
941
	int i;
942
 
943
	pi->vddc_leakage.count = 0;
944
	pi->vddci_leakage.count = 0;
945
 
946
	if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
947
		for (i = 0; i < CISLANDS_MAX_LEAKAGE_COUNT; i++) {
948
			virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
949
			if (radeon_atom_get_voltage_evv(rdev, virtual_voltage_id, &vddc) != 0)
950
				continue;
951
			if (vddc != 0 && vddc != virtual_voltage_id) {
952
				pi->vddc_leakage.actual_voltage[pi->vddc_leakage.count] = vddc;
953
				pi->vddc_leakage.leakage_id[pi->vddc_leakage.count] = virtual_voltage_id;
954
				pi->vddc_leakage.count++;
955
			}
956
		}
957
	} else if (radeon_atom_get_leakage_id_from_vbios(rdev, &leakage_id) == 0) {
958
		for (i = 0; i < CISLANDS_MAX_LEAKAGE_COUNT; i++) {
959
			virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
960
			if (radeon_atom_get_leakage_vddc_based_on_leakage_params(rdev, &vddc, &vddci,
961
										 virtual_voltage_id,
962
										 leakage_id) == 0) {
963
				if (vddc != 0 && vddc != virtual_voltage_id) {
964
					pi->vddc_leakage.actual_voltage[pi->vddc_leakage.count] = vddc;
965
					pi->vddc_leakage.leakage_id[pi->vddc_leakage.count] = virtual_voltage_id;
966
					pi->vddc_leakage.count++;
967
				}
968
				if (vddci != 0 && vddci != virtual_voltage_id) {
969
					pi->vddci_leakage.actual_voltage[pi->vddci_leakage.count] = vddci;
970
					pi->vddci_leakage.leakage_id[pi->vddci_leakage.count] = virtual_voltage_id;
971
					pi->vddci_leakage.count++;
972
				}
973
			}
974
		}
975
	}
976
}
977
 
978
static void ci_set_dpm_event_sources(struct radeon_device *rdev, u32 sources)
979
{
980
	struct ci_power_info *pi = ci_get_pi(rdev);
981
	bool want_thermal_protection;
982
	enum radeon_dpm_event_src dpm_event_src;
983
	u32 tmp;
984
 
985
	switch (sources) {
986
	case 0:
987
	default:
988
		want_thermal_protection = false;
989
		break;
990
	case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL):
991
		want_thermal_protection = true;
992
		dpm_event_src = RADEON_DPM_EVENT_SRC_DIGITAL;
993
		break;
994
	case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL):
995
		want_thermal_protection = true;
996
		dpm_event_src = RADEON_DPM_EVENT_SRC_EXTERNAL;
997
		break;
998
	case ((1 << RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL) |
999
	      (1 << RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL)):
1000
		want_thermal_protection = true;
1001
		dpm_event_src = RADEON_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL;
1002
		break;
1003
	}
1004
 
1005
	if (want_thermal_protection) {
1006
#if 0
1007
		/* XXX: need to figure out how to handle this properly */
1008
		tmp = RREG32_SMC(CG_THERMAL_CTRL);
1009
		tmp &= DPM_EVENT_SRC_MASK;
1010
		tmp |= DPM_EVENT_SRC(dpm_event_src);
1011
		WREG32_SMC(CG_THERMAL_CTRL, tmp);
1012
#endif
1013
 
1014
		tmp = RREG32_SMC(GENERAL_PWRMGT);
1015
		if (pi->thermal_protection)
1016
			tmp &= ~THERMAL_PROTECTION_DIS;
1017
		else
1018
			tmp |= THERMAL_PROTECTION_DIS;
1019
		WREG32_SMC(GENERAL_PWRMGT, tmp);
1020
	} else {
1021
		tmp = RREG32_SMC(GENERAL_PWRMGT);
1022
		tmp |= THERMAL_PROTECTION_DIS;
1023
		WREG32_SMC(GENERAL_PWRMGT, tmp);
1024
	}
1025
}
1026
 
1027
static void ci_enable_auto_throttle_source(struct radeon_device *rdev,
1028
					   enum radeon_dpm_auto_throttle_src source,
1029
					   bool enable)
1030
{
1031
	struct ci_power_info *pi = ci_get_pi(rdev);
1032
 
1033
	if (enable) {
1034
		if (!(pi->active_auto_throttle_sources & (1 << source))) {
1035
			pi->active_auto_throttle_sources |= 1 << source;
1036
			ci_set_dpm_event_sources(rdev, pi->active_auto_throttle_sources);
1037
		}
1038
	} else {
1039
		if (pi->active_auto_throttle_sources & (1 << source)) {
1040
			pi->active_auto_throttle_sources &= ~(1 << source);
1041
			ci_set_dpm_event_sources(rdev, pi->active_auto_throttle_sources);
1042
		}
1043
	}
1044
}
1045
 
1046
static void ci_enable_vr_hot_gpio_interrupt(struct radeon_device *rdev)
1047
{
1048
	if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_REGULATOR_HOT)
1049
		ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableVRHotGPIOInterrupt);
1050
}
1051
 
1052
static int ci_unfreeze_sclk_mclk_dpm(struct radeon_device *rdev)
1053
{
1054
	struct ci_power_info *pi = ci_get_pi(rdev);
1055
	PPSMC_Result smc_result;
1056
 
1057
	if (!pi->need_update_smu7_dpm_table)
1058
		return 0;
1059
 
1060
	if ((!pi->sclk_dpm_key_disabled) &&
1061
	    (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK))) {
1062
		smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_SCLKDPM_UnfreezeLevel);
1063
		if (smc_result != PPSMC_Result_OK)
1064
			return -EINVAL;
1065
	}
1066
 
1067
	if ((!pi->mclk_dpm_key_disabled) &&
1068
	    (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
1069
		smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_UnfreezeLevel);
1070
		if (smc_result != PPSMC_Result_OK)
1071
			return -EINVAL;
1072
	}
1073
 
1074
	pi->need_update_smu7_dpm_table = 0;
1075
	return 0;
1076
}
1077
 
1078
static int ci_enable_sclk_mclk_dpm(struct radeon_device *rdev, bool enable)
1079
{
1080
	struct ci_power_info *pi = ci_get_pi(rdev);
1081
	PPSMC_Result smc_result;
1082
 
1083
	if (enable) {
1084
		if (!pi->sclk_dpm_key_disabled) {
1085
			smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_DPM_Enable);
1086
			if (smc_result != PPSMC_Result_OK)
1087
				return -EINVAL;
1088
		}
1089
 
1090
		if (!pi->mclk_dpm_key_disabled) {
1091
			smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_Enable);
1092
			if (smc_result != PPSMC_Result_OK)
1093
				return -EINVAL;
1094
 
1095
			WREG32_P(MC_SEQ_CNTL_3, CAC_EN, ~CAC_EN);
1096
 
1097
			WREG32_SMC(LCAC_MC0_CNTL, 0x05);
1098
			WREG32_SMC(LCAC_MC1_CNTL, 0x05);
1099
			WREG32_SMC(LCAC_CPL_CNTL, 0x100005);
1100
 
1101
			udelay(10);
1102
 
1103
			WREG32_SMC(LCAC_MC0_CNTL, 0x400005);
1104
			WREG32_SMC(LCAC_MC1_CNTL, 0x400005);
1105
			WREG32_SMC(LCAC_CPL_CNTL, 0x500005);
1106
		}
1107
	} else {
1108
		if (!pi->sclk_dpm_key_disabled) {
1109
			smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_DPM_Disable);
1110
			if (smc_result != PPSMC_Result_OK)
1111
				return -EINVAL;
1112
		}
1113
 
1114
		if (!pi->mclk_dpm_key_disabled) {
1115
			smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_Disable);
1116
			if (smc_result != PPSMC_Result_OK)
1117
				return -EINVAL;
1118
		}
1119
	}
1120
 
1121
	return 0;
1122
}
1123
 
1124
static int ci_start_dpm(struct radeon_device *rdev)
1125
{
1126
	struct ci_power_info *pi = ci_get_pi(rdev);
1127
	PPSMC_Result smc_result;
1128
	int ret;
1129
	u32 tmp;
1130
 
1131
	tmp = RREG32_SMC(GENERAL_PWRMGT);
1132
	tmp |= GLOBAL_PWRMGT_EN;
1133
	WREG32_SMC(GENERAL_PWRMGT, tmp);
1134
 
1135
	tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
1136
	tmp |= DYNAMIC_PM_EN;
1137
	WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
1138
 
1139
	ci_write_smc_soft_register(rdev, offsetof(SMU7_SoftRegisters, VoltageChangeTimeout), 0x1000);
1140
 
1141
	WREG32_P(BIF_LNCNT_RESET, 0, ~RESET_LNCNT_EN);
1142
 
1143
	smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_Voltage_Cntl_Enable);
1144
	if (smc_result != PPSMC_Result_OK)
1145
		return -EINVAL;
1146
 
1147
	ret = ci_enable_sclk_mclk_dpm(rdev, true);
1148
	if (ret)
1149
		return ret;
1150
 
1151
	if (!pi->pcie_dpm_key_disabled) {
1152
		smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_PCIeDPM_Enable);
1153
		if (smc_result != PPSMC_Result_OK)
1154
			return -EINVAL;
1155
	}
1156
 
1157
	return 0;
1158
}
1159
 
1160
static int ci_freeze_sclk_mclk_dpm(struct radeon_device *rdev)
1161
{
1162
	struct ci_power_info *pi = ci_get_pi(rdev);
1163
	PPSMC_Result smc_result;
1164
 
1165
	if (!pi->need_update_smu7_dpm_table)
1166
		return 0;
1167
 
1168
	if ((!pi->sclk_dpm_key_disabled) &&
1169
	    (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK))) {
1170
		smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_SCLKDPM_FreezeLevel);
1171
		if (smc_result != PPSMC_Result_OK)
1172
			return -EINVAL;
1173
	}
1174
 
1175
	if ((!pi->mclk_dpm_key_disabled) &&
1176
	    (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
1177
		smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_FreezeLevel);
1178
		if (smc_result != PPSMC_Result_OK)
1179
			return -EINVAL;
1180
	}
1181
 
1182
	return 0;
1183
}
1184
 
1185
static int ci_stop_dpm(struct radeon_device *rdev)
1186
{
1187
	struct ci_power_info *pi = ci_get_pi(rdev);
1188
	PPSMC_Result smc_result;
1189
	int ret;
1190
	u32 tmp;
1191
 
1192
	tmp = RREG32_SMC(GENERAL_PWRMGT);
1193
	tmp &= ~GLOBAL_PWRMGT_EN;
1194
	WREG32_SMC(GENERAL_PWRMGT, tmp);
1195
 
1196
	tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
1197
	tmp &= ~DYNAMIC_PM_EN;
1198
	WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
1199
 
1200
	if (!pi->pcie_dpm_key_disabled) {
1201
		smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_PCIeDPM_Disable);
1202
		if (smc_result != PPSMC_Result_OK)
1203
			return -EINVAL;
1204
	}
1205
 
1206
	ret = ci_enable_sclk_mclk_dpm(rdev, false);
1207
	if (ret)
1208
		return ret;
1209
 
1210
	smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_Voltage_Cntl_Disable);
1211
	if (smc_result != PPSMC_Result_OK)
1212
		return -EINVAL;
1213
 
1214
	return 0;
1215
}
1216
 
1217
static void ci_enable_sclk_control(struct radeon_device *rdev, bool enable)
1218
{
1219
	u32 tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
1220
 
1221
	if (enable)
1222
		tmp &= ~SCLK_PWRMGT_OFF;
1223
	else
1224
		tmp |= SCLK_PWRMGT_OFF;
1225
	WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
1226
}
1227
 
1228
#if 0
1229
static int ci_notify_hw_of_power_source(struct radeon_device *rdev,
1230
					bool ac_power)
1231
{
1232
	struct ci_power_info *pi = ci_get_pi(rdev);
1233
	struct radeon_cac_tdp_table *cac_tdp_table =
1234
		rdev->pm.dpm.dyn_state.cac_tdp_table;
1235
	u32 power_limit;
1236
 
1237
	if (ac_power)
1238
		power_limit = (u32)(cac_tdp_table->maximum_power_delivery_limit * 256);
1239
	else
1240
		power_limit = (u32)(cac_tdp_table->battery_power_limit * 256);
1241
 
1242
        ci_set_power_limit(rdev, power_limit);
1243
 
1244
	if (pi->caps_automatic_dc_transition) {
1245
		if (ac_power)
1246
			ci_send_msg_to_smc(rdev, PPSMC_MSG_RunningOnAC);
1247
		else
1248
			ci_send_msg_to_smc(rdev, PPSMC_MSG_Remove_DC_Clamp);
1249
	}
1250
 
1251
	return 0;
1252
}
1253
#endif
1254
 
1255
static PPSMC_Result ci_send_msg_to_smc_with_parameter(struct radeon_device *rdev,
1256
						      PPSMC_Msg msg, u32 parameter)
1257
{
1258
	WREG32(SMC_MSG_ARG_0, parameter);
1259
	return ci_send_msg_to_smc(rdev, msg);
1260
}
1261
 
1262
static PPSMC_Result ci_send_msg_to_smc_return_parameter(struct radeon_device *rdev,
1263
							PPSMC_Msg msg, u32 *parameter)
1264
{
1265
	PPSMC_Result smc_result;
1266
 
1267
	smc_result = ci_send_msg_to_smc(rdev, msg);
1268
 
1269
	if ((smc_result == PPSMC_Result_OK) && parameter)
1270
		*parameter = RREG32(SMC_MSG_ARG_0);
1271
 
1272
	return smc_result;
1273
}
1274
 
1275
static int ci_dpm_force_state_sclk(struct radeon_device *rdev, u32 n)
1276
{
1277
	struct ci_power_info *pi = ci_get_pi(rdev);
1278
 
1279
	if (!pi->sclk_dpm_key_disabled) {
1280
		PPSMC_Result smc_result =
1281
			ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, n);
1282
		if (smc_result != PPSMC_Result_OK)
1283
			return -EINVAL;
1284
	}
1285
 
1286
	return 0;
1287
}
1288
 
1289
static int ci_dpm_force_state_mclk(struct radeon_device *rdev, u32 n)
1290
{
1291
	struct ci_power_info *pi = ci_get_pi(rdev);
1292
 
1293
	if (!pi->mclk_dpm_key_disabled) {
1294
		PPSMC_Result smc_result =
1295
			ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_MCLKDPM_ForceState, n);
1296
		if (smc_result != PPSMC_Result_OK)
1297
			return -EINVAL;
1298
	}
1299
 
1300
	return 0;
1301
}
1302
 
1303
static int ci_dpm_force_state_pcie(struct radeon_device *rdev, u32 n)
1304
{
1305
	struct ci_power_info *pi = ci_get_pi(rdev);
1306
 
1307
	if (!pi->pcie_dpm_key_disabled) {
1308
		PPSMC_Result smc_result =
1309
			ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_PCIeDPM_ForceLevel, n);
1310
		if (smc_result != PPSMC_Result_OK)
1311
			return -EINVAL;
1312
	}
1313
 
1314
	return 0;
1315
}
1316
 
1317
static int ci_set_power_limit(struct radeon_device *rdev, u32 n)
1318
{
1319
	struct ci_power_info *pi = ci_get_pi(rdev);
1320
 
1321
	if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_PkgPwrLimit) {
1322
		PPSMC_Result smc_result =
1323
			ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_PkgPwrSetLimit, n);
1324
		if (smc_result != PPSMC_Result_OK)
1325
			return -EINVAL;
1326
	}
1327
 
1328
	return 0;
1329
}
1330
 
1331
static int ci_set_overdrive_target_tdp(struct radeon_device *rdev,
1332
				       u32 target_tdp)
1333
{
1334
	PPSMC_Result smc_result =
1335
		ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_OverDriveSetTargetTdp, target_tdp);
1336
	if (smc_result != PPSMC_Result_OK)
1337
		return -EINVAL;
1338
	return 0;
1339
}
1340
 
1341
static int ci_set_boot_state(struct radeon_device *rdev)
1342
{
1343
	return ci_enable_sclk_mclk_dpm(rdev, false);
1344
}
1345
 
1346
static u32 ci_get_average_sclk_freq(struct radeon_device *rdev)
1347
{
1348
	u32 sclk_freq;
1349
	PPSMC_Result smc_result =
1350
		ci_send_msg_to_smc_return_parameter(rdev,
1351
						    PPSMC_MSG_API_GetSclkFrequency,
1352
						    &sclk_freq);
1353
	if (smc_result != PPSMC_Result_OK)
1354
		sclk_freq = 0;
1355
 
1356
	return sclk_freq;
1357
}
1358
 
1359
static u32 ci_get_average_mclk_freq(struct radeon_device *rdev)
1360
{
1361
	u32 mclk_freq;
1362
	PPSMC_Result smc_result =
1363
		ci_send_msg_to_smc_return_parameter(rdev,
1364
						    PPSMC_MSG_API_GetMclkFrequency,
1365
						    &mclk_freq);
1366
	if (smc_result != PPSMC_Result_OK)
1367
		mclk_freq = 0;
1368
 
1369
	return mclk_freq;
1370
}
1371
 
1372
static void ci_dpm_start_smc(struct radeon_device *rdev)
1373
{
1374
	int i;
1375
 
1376
	ci_program_jump_on_start(rdev);
1377
	ci_start_smc_clock(rdev);
1378
	ci_start_smc(rdev);
1379
	for (i = 0; i < rdev->usec_timeout; i++) {
1380
		if (RREG32_SMC(FIRMWARE_FLAGS) & INTERRUPTS_ENABLED)
1381
			break;
1382
	}
1383
}
1384
 
1385
static void ci_dpm_stop_smc(struct radeon_device *rdev)
1386
{
1387
	ci_reset_smc(rdev);
1388
	ci_stop_smc_clock(rdev);
1389
}
1390
 
1391
static int ci_process_firmware_header(struct radeon_device *rdev)
1392
{
1393
	struct ci_power_info *pi = ci_get_pi(rdev);
1394
	u32 tmp;
1395
	int ret;
1396
 
1397
	ret = ci_read_smc_sram_dword(rdev,
1398
				     SMU7_FIRMWARE_HEADER_LOCATION +
1399
				     offsetof(SMU7_Firmware_Header, DpmTable),
1400
				     &tmp, pi->sram_end);
1401
	if (ret)
1402
		return ret;
1403
 
1404
	pi->dpm_table_start = tmp;
1405
 
1406
	ret = ci_read_smc_sram_dword(rdev,
1407
				     SMU7_FIRMWARE_HEADER_LOCATION +
1408
				     offsetof(SMU7_Firmware_Header, SoftRegisters),
1409
				     &tmp, pi->sram_end);
1410
	if (ret)
1411
		return ret;
1412
 
1413
	pi->soft_regs_start = tmp;
1414
 
1415
	ret = ci_read_smc_sram_dword(rdev,
1416
				     SMU7_FIRMWARE_HEADER_LOCATION +
1417
				     offsetof(SMU7_Firmware_Header, mcRegisterTable),
1418
				     &tmp, pi->sram_end);
1419
	if (ret)
1420
		return ret;
1421
 
1422
	pi->mc_reg_table_start = tmp;
1423
 
1424
	ret = ci_read_smc_sram_dword(rdev,
1425
				     SMU7_FIRMWARE_HEADER_LOCATION +
1426
				     offsetof(SMU7_Firmware_Header, FanTable),
1427
				     &tmp, pi->sram_end);
1428
	if (ret)
1429
		return ret;
1430
 
1431
	pi->fan_table_start = tmp;
1432
 
1433
	ret = ci_read_smc_sram_dword(rdev,
1434
				     SMU7_FIRMWARE_HEADER_LOCATION +
1435
				     offsetof(SMU7_Firmware_Header, mcArbDramTimingTable),
1436
				     &tmp, pi->sram_end);
1437
	if (ret)
1438
		return ret;
1439
 
1440
	pi->arb_table_start = tmp;
1441
 
1442
	return 0;
1443
}
1444
 
1445
static void ci_read_clock_registers(struct radeon_device *rdev)
1446
{
1447
	struct ci_power_info *pi = ci_get_pi(rdev);
1448
 
1449
	pi->clock_registers.cg_spll_func_cntl =
1450
		RREG32_SMC(CG_SPLL_FUNC_CNTL);
1451
	pi->clock_registers.cg_spll_func_cntl_2 =
1452
		RREG32_SMC(CG_SPLL_FUNC_CNTL_2);
1453
	pi->clock_registers.cg_spll_func_cntl_3 =
1454
		RREG32_SMC(CG_SPLL_FUNC_CNTL_3);
1455
	pi->clock_registers.cg_spll_func_cntl_4 =
1456
		RREG32_SMC(CG_SPLL_FUNC_CNTL_4);
1457
	pi->clock_registers.cg_spll_spread_spectrum =
1458
		RREG32_SMC(CG_SPLL_SPREAD_SPECTRUM);
1459
	pi->clock_registers.cg_spll_spread_spectrum_2 =
1460
		RREG32_SMC(CG_SPLL_SPREAD_SPECTRUM_2);
1461
	pi->clock_registers.dll_cntl = RREG32(DLL_CNTL);
1462
	pi->clock_registers.mclk_pwrmgt_cntl = RREG32(MCLK_PWRMGT_CNTL);
1463
	pi->clock_registers.mpll_ad_func_cntl = RREG32(MPLL_AD_FUNC_CNTL);
1464
	pi->clock_registers.mpll_dq_func_cntl = RREG32(MPLL_DQ_FUNC_CNTL);
1465
	pi->clock_registers.mpll_func_cntl = RREG32(MPLL_FUNC_CNTL);
1466
	pi->clock_registers.mpll_func_cntl_1 = RREG32(MPLL_FUNC_CNTL_1);
1467
	pi->clock_registers.mpll_func_cntl_2 = RREG32(MPLL_FUNC_CNTL_2);
1468
	pi->clock_registers.mpll_ss1 = RREG32(MPLL_SS1);
1469
	pi->clock_registers.mpll_ss2 = RREG32(MPLL_SS2);
1470
}
1471
 
1472
static void ci_init_sclk_t(struct radeon_device *rdev)
1473
{
1474
	struct ci_power_info *pi = ci_get_pi(rdev);
1475
 
1476
	pi->low_sclk_interrupt_t = 0;
1477
}
1478
 
1479
static void ci_enable_thermal_protection(struct radeon_device *rdev,
1480
					 bool enable)
1481
{
1482
	u32 tmp = RREG32_SMC(GENERAL_PWRMGT);
1483
 
1484
	if (enable)
1485
		tmp &= ~THERMAL_PROTECTION_DIS;
1486
	else
1487
		tmp |= THERMAL_PROTECTION_DIS;
1488
	WREG32_SMC(GENERAL_PWRMGT, tmp);
1489
}
1490
 
1491
static void ci_enable_acpi_power_management(struct radeon_device *rdev)
1492
{
1493
	u32 tmp = RREG32_SMC(GENERAL_PWRMGT);
1494
 
1495
	tmp |= STATIC_PM_EN;
1496
 
1497
	WREG32_SMC(GENERAL_PWRMGT, tmp);
1498
}
1499
 
1500
#if 0
1501
static int ci_enter_ulp_state(struct radeon_device *rdev)
1502
{
1503
 
1504
	WREG32(SMC_MESSAGE_0, PPSMC_MSG_SwitchToMinimumPower);
1505
 
1506
	udelay(25000);
1507
 
1508
	return 0;
1509
}
1510
 
1511
static int ci_exit_ulp_state(struct radeon_device *rdev)
1512
{
1513
	int i;
1514
 
1515
	WREG32(SMC_MESSAGE_0, PPSMC_MSG_ResumeFromMinimumPower);
1516
 
1517
	udelay(7000);
1518
 
1519
	for (i = 0; i < rdev->usec_timeout; i++) {
1520
		if (RREG32(SMC_RESP_0) == 1)
1521
			break;
1522
		udelay(1000);
1523
	}
1524
 
1525
	return 0;
1526
}
1527
#endif
1528
 
1529
static int ci_notify_smc_display_change(struct radeon_device *rdev,
1530
					bool has_display)
1531
{
1532
	PPSMC_Msg msg = has_display ? PPSMC_MSG_HasDisplay : PPSMC_MSG_NoDisplay;
1533
 
1534
	return (ci_send_msg_to_smc(rdev, msg) == PPSMC_Result_OK) ?  0 : -EINVAL;
1535
}
1536
 
1537
static int ci_enable_ds_master_switch(struct radeon_device *rdev,
1538
				      bool enable)
1539
{
1540
	struct ci_power_info *pi = ci_get_pi(rdev);
1541
 
1542
	if (enable) {
1543
		if (pi->caps_sclk_ds) {
1544
			if (ci_send_msg_to_smc(rdev, PPSMC_MSG_MASTER_DeepSleep_ON) != PPSMC_Result_OK)
1545
				return -EINVAL;
1546
		} else {
1547
			if (ci_send_msg_to_smc(rdev, PPSMC_MSG_MASTER_DeepSleep_OFF) != PPSMC_Result_OK)
1548
				return -EINVAL;
1549
		}
1550
	} else {
1551
		if (pi->caps_sclk_ds) {
1552
			if (ci_send_msg_to_smc(rdev, PPSMC_MSG_MASTER_DeepSleep_OFF) != PPSMC_Result_OK)
1553
				return -EINVAL;
1554
		}
1555
	}
1556
 
1557
	return 0;
1558
}
1559
 
1560
static void ci_program_display_gap(struct radeon_device *rdev)
1561
{
1562
	u32 tmp = RREG32_SMC(CG_DISPLAY_GAP_CNTL);
1563
	u32 pre_vbi_time_in_us;
1564
	u32 frame_time_in_us;
1565
	u32 ref_clock = rdev->clock.spll.reference_freq;
1566
	u32 refresh_rate = r600_dpm_get_vrefresh(rdev);
1567
	u32 vblank_time = r600_dpm_get_vblank_time(rdev);
1568
 
1569
	tmp &= ~DISP_GAP_MASK;
1570
	if (rdev->pm.dpm.new_active_crtc_count > 0)
1571
		tmp |= DISP_GAP(R600_PM_DISPLAY_GAP_VBLANK_OR_WM);
1572
	else
1573
		tmp |= DISP_GAP(R600_PM_DISPLAY_GAP_IGNORE);
1574
	WREG32_SMC(CG_DISPLAY_GAP_CNTL, tmp);
1575
 
1576
	if (refresh_rate == 0)
1577
		refresh_rate = 60;
1578
	if (vblank_time == 0xffffffff)
1579
		vblank_time = 500;
1580
	frame_time_in_us = 1000000 / refresh_rate;
1581
	pre_vbi_time_in_us =
1582
		frame_time_in_us - 200 - vblank_time;
1583
	tmp = pre_vbi_time_in_us * (ref_clock / 100);
1584
 
1585
	WREG32_SMC(CG_DISPLAY_GAP_CNTL2, tmp);
1586
	ci_write_smc_soft_register(rdev, offsetof(SMU7_SoftRegisters, PreVBlankGap), 0x64);
1587
	ci_write_smc_soft_register(rdev, offsetof(SMU7_SoftRegisters, VBlankTimeout), (frame_time_in_us - pre_vbi_time_in_us));
1588
 
1589
 
1590
	ci_notify_smc_display_change(rdev, (rdev->pm.dpm.new_active_crtc_count == 1));
1591
 
1592
}
1593
 
1594
static void ci_enable_spread_spectrum(struct radeon_device *rdev, bool enable)
1595
{
1596
	struct ci_power_info *pi = ci_get_pi(rdev);
1597
	u32 tmp;
1598
 
1599
	if (enable) {
1600
		if (pi->caps_sclk_ss_support) {
1601
			tmp = RREG32_SMC(GENERAL_PWRMGT);
1602
			tmp |= DYN_SPREAD_SPECTRUM_EN;
1603
			WREG32_SMC(GENERAL_PWRMGT, tmp);
1604
		}
1605
	} else {
1606
		tmp = RREG32_SMC(CG_SPLL_SPREAD_SPECTRUM);
1607
		tmp &= ~SSEN;
1608
		WREG32_SMC(CG_SPLL_SPREAD_SPECTRUM, tmp);
1609
 
1610
		tmp = RREG32_SMC(GENERAL_PWRMGT);
1611
		tmp &= ~DYN_SPREAD_SPECTRUM_EN;
1612
		WREG32_SMC(GENERAL_PWRMGT, tmp);
1613
	}
1614
}
1615
 
1616
static void ci_program_sstp(struct radeon_device *rdev)
1617
{
1618
	WREG32_SMC(CG_SSP, (SSTU(R600_SSTU_DFLT) | SST(R600_SST_DFLT)));
1619
}
1620
 
1621
static void ci_enable_display_gap(struct radeon_device *rdev)
1622
{
1623
	u32 tmp = RREG32_SMC(CG_DISPLAY_GAP_CNTL);
1624
 
1625
        tmp &= ~(DISP_GAP_MASK | DISP_GAP_MCHG_MASK);
1626
        tmp |= (DISP_GAP(R600_PM_DISPLAY_GAP_IGNORE) |
1627
                DISP_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK));
1628
 
1629
	WREG32_SMC(CG_DISPLAY_GAP_CNTL, tmp);
1630
}
1631
 
1632
static void ci_program_vc(struct radeon_device *rdev)
1633
{
1634
	u32 tmp;
1635
 
1636
	tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
1637
	tmp &= ~(RESET_SCLK_CNT | RESET_BUSY_CNT);
1638
	WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
1639
 
1640
	WREG32_SMC(CG_FTV_0, CISLANDS_VRC_DFLT0);
1641
	WREG32_SMC(CG_FTV_1, CISLANDS_VRC_DFLT1);
1642
	WREG32_SMC(CG_FTV_2, CISLANDS_VRC_DFLT2);
1643
	WREG32_SMC(CG_FTV_3, CISLANDS_VRC_DFLT3);
1644
	WREG32_SMC(CG_FTV_4, CISLANDS_VRC_DFLT4);
1645
	WREG32_SMC(CG_FTV_5, CISLANDS_VRC_DFLT5);
1646
	WREG32_SMC(CG_FTV_6, CISLANDS_VRC_DFLT6);
1647
	WREG32_SMC(CG_FTV_7, CISLANDS_VRC_DFLT7);
1648
}
1649
 
1650
static void ci_clear_vc(struct radeon_device *rdev)
1651
{
1652
	u32 tmp;
1653
 
1654
	tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
1655
	tmp |= (RESET_SCLK_CNT | RESET_BUSY_CNT);
1656
	WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
1657
 
1658
	WREG32_SMC(CG_FTV_0, 0);
1659
	WREG32_SMC(CG_FTV_1, 0);
1660
	WREG32_SMC(CG_FTV_2, 0);
1661
	WREG32_SMC(CG_FTV_3, 0);
1662
	WREG32_SMC(CG_FTV_4, 0);
1663
	WREG32_SMC(CG_FTV_5, 0);
1664
	WREG32_SMC(CG_FTV_6, 0);
1665
	WREG32_SMC(CG_FTV_7, 0);
1666
}
1667
 
1668
static int ci_upload_firmware(struct radeon_device *rdev)
1669
{
1670
	struct ci_power_info *pi = ci_get_pi(rdev);
1671
	int i, ret;
1672
 
1673
	for (i = 0; i < rdev->usec_timeout; i++) {
1674
		if (RREG32_SMC(RCU_UC_EVENTS) & BOOT_SEQ_DONE)
1675
			break;
1676
	}
1677
	WREG32_SMC(SMC_SYSCON_MISC_CNTL, 1);
1678
 
1679
	ci_stop_smc_clock(rdev);
1680
	ci_reset_smc(rdev);
1681
 
1682
	ret = ci_load_smc_ucode(rdev, pi->sram_end);
1683
 
1684
	return ret;
1685
 
1686
}
1687
 
1688
static int ci_get_svi2_voltage_table(struct radeon_device *rdev,
1689
				     struct radeon_clock_voltage_dependency_table *voltage_dependency_table,
1690
				     struct atom_voltage_table *voltage_table)
1691
{
1692
	u32 i;
1693
 
1694
	if (voltage_dependency_table == NULL)
1695
		return -EINVAL;
1696
 
1697
	voltage_table->mask_low = 0;
1698
	voltage_table->phase_delay = 0;
1699
 
1700
	voltage_table->count = voltage_dependency_table->count;
1701
	for (i = 0; i < voltage_table->count; i++) {
1702
		voltage_table->entries[i].value = voltage_dependency_table->entries[i].v;
1703
		voltage_table->entries[i].smio_low = 0;
1704
	}
1705
 
1706
	return 0;
1707
}
1708
 
1709
static int ci_construct_voltage_tables(struct radeon_device *rdev)
1710
{
1711
	struct ci_power_info *pi = ci_get_pi(rdev);
1712
	int ret;
1713
 
1714
	if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
1715
		ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_VDDC,
1716
						    VOLTAGE_OBJ_GPIO_LUT,
1717
						    &pi->vddc_voltage_table);
1718
		if (ret)
1719
			return ret;
1720
	} else if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
1721
		ret = ci_get_svi2_voltage_table(rdev,
1722
						&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
1723
						&pi->vddc_voltage_table);
1724
		if (ret)
1725
			return ret;
1726
	}
1727
 
1728
	if (pi->vddc_voltage_table.count > SMU7_MAX_LEVELS_VDDC)
1729
		si_trim_voltage_table_to_fit_state_table(rdev, SMU7_MAX_LEVELS_VDDC,
1730
							 &pi->vddc_voltage_table);
1731
 
1732
	if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
1733
		ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_VDDCI,
1734
						    VOLTAGE_OBJ_GPIO_LUT,
1735
						    &pi->vddci_voltage_table);
1736
		if (ret)
1737
			return ret;
1738
	} else if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
1739
		ret = ci_get_svi2_voltage_table(rdev,
1740
						&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
1741
						&pi->vddci_voltage_table);
1742
		if (ret)
1743
			return ret;
1744
	}
1745
 
1746
	if (pi->vddci_voltage_table.count > SMU7_MAX_LEVELS_VDDCI)
1747
		si_trim_voltage_table_to_fit_state_table(rdev, SMU7_MAX_LEVELS_VDDCI,
1748
							 &pi->vddci_voltage_table);
1749
 
1750
	if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
1751
		ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_MVDDC,
1752
						    VOLTAGE_OBJ_GPIO_LUT,
1753
						    &pi->mvdd_voltage_table);
1754
		if (ret)
1755
			return ret;
1756
	} else if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
1757
		ret = ci_get_svi2_voltage_table(rdev,
1758
						&rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
1759
						&pi->mvdd_voltage_table);
1760
		if (ret)
1761
			return ret;
1762
	}
1763
 
1764
	if (pi->mvdd_voltage_table.count > SMU7_MAX_LEVELS_MVDD)
1765
		si_trim_voltage_table_to_fit_state_table(rdev, SMU7_MAX_LEVELS_MVDD,
1766
							 &pi->mvdd_voltage_table);
1767
 
1768
	return 0;
1769
}
1770
 
1771
static void ci_populate_smc_voltage_table(struct radeon_device *rdev,
1772
					  struct atom_voltage_table_entry *voltage_table,
1773
					  SMU7_Discrete_VoltageLevel *smc_voltage_table)
1774
{
1775
	int ret;
1776
 
1777
	ret = ci_get_std_voltage_value_sidd(rdev, voltage_table,
1778
					    &smc_voltage_table->StdVoltageHiSidd,
1779
					    &smc_voltage_table->StdVoltageLoSidd);
1780
 
1781
	if (ret) {
1782
		smc_voltage_table->StdVoltageHiSidd = voltage_table->value * VOLTAGE_SCALE;
1783
		smc_voltage_table->StdVoltageLoSidd = voltage_table->value * VOLTAGE_SCALE;
1784
	}
1785
 
1786
	smc_voltage_table->Voltage = cpu_to_be16(voltage_table->value * VOLTAGE_SCALE);
1787
	smc_voltage_table->StdVoltageHiSidd =
1788
		cpu_to_be16(smc_voltage_table->StdVoltageHiSidd);
1789
	smc_voltage_table->StdVoltageLoSidd =
1790
		cpu_to_be16(smc_voltage_table->StdVoltageLoSidd);
1791
}
1792
 
1793
static int ci_populate_smc_vddc_table(struct radeon_device *rdev,
1794
				      SMU7_Discrete_DpmTable *table)
1795
{
1796
	struct ci_power_info *pi = ci_get_pi(rdev);
1797
	unsigned int count;
1798
 
1799
	table->VddcLevelCount = pi->vddc_voltage_table.count;
1800
	for (count = 0; count < table->VddcLevelCount; count++) {
1801
		ci_populate_smc_voltage_table(rdev,
1802
					      &pi->vddc_voltage_table.entries[count],
1803
					      &table->VddcLevel[count]);
1804
 
1805
		if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
1806
			table->VddcLevel[count].Smio |=
1807
				pi->vddc_voltage_table.entries[count].smio_low;
1808
		else
1809
			table->VddcLevel[count].Smio = 0;
1810
	}
1811
	table->VddcLevelCount = cpu_to_be32(table->VddcLevelCount);
1812
 
1813
	return 0;
1814
}
1815
 
1816
static int ci_populate_smc_vddci_table(struct radeon_device *rdev,
1817
				       SMU7_Discrete_DpmTable *table)
1818
{
1819
	unsigned int count;
1820
	struct ci_power_info *pi = ci_get_pi(rdev);
1821
 
1822
	table->VddciLevelCount = pi->vddci_voltage_table.count;
1823
	for (count = 0; count < table->VddciLevelCount; count++) {
1824
		ci_populate_smc_voltage_table(rdev,
1825
					      &pi->vddci_voltage_table.entries[count],
1826
					      &table->VddciLevel[count]);
1827
 
1828
		if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
1829
			table->VddciLevel[count].Smio |=
1830
				pi->vddci_voltage_table.entries[count].smio_low;
1831
		else
1832
			table->VddciLevel[count].Smio = 0;
1833
	}
1834
	table->VddciLevelCount = cpu_to_be32(table->VddciLevelCount);
1835
 
1836
	return 0;
1837
}
1838
 
1839
static int ci_populate_smc_mvdd_table(struct radeon_device *rdev,
1840
				      SMU7_Discrete_DpmTable *table)
1841
{
1842
	struct ci_power_info *pi = ci_get_pi(rdev);
1843
	unsigned int count;
1844
 
1845
	table->MvddLevelCount = pi->mvdd_voltage_table.count;
1846
	for (count = 0; count < table->MvddLevelCount; count++) {
1847
		ci_populate_smc_voltage_table(rdev,
1848
					      &pi->mvdd_voltage_table.entries[count],
1849
					      &table->MvddLevel[count]);
1850
 
1851
		if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
1852
			table->MvddLevel[count].Smio |=
1853
				pi->mvdd_voltage_table.entries[count].smio_low;
1854
		else
1855
			table->MvddLevel[count].Smio = 0;
1856
	}
1857
	table->MvddLevelCount = cpu_to_be32(table->MvddLevelCount);
1858
 
1859
	return 0;
1860
}
1861
 
1862
static int ci_populate_smc_voltage_tables(struct radeon_device *rdev,
1863
					  SMU7_Discrete_DpmTable *table)
1864
{
1865
	int ret;
1866
 
1867
	ret = ci_populate_smc_vddc_table(rdev, table);
1868
	if (ret)
1869
		return ret;
1870
 
1871
	ret = ci_populate_smc_vddci_table(rdev, table);
1872
	if (ret)
1873
		return ret;
1874
 
1875
	ret = ci_populate_smc_mvdd_table(rdev, table);
1876
	if (ret)
1877
		return ret;
1878
 
1879
	return 0;
1880
}
1881
 
1882
static int ci_populate_mvdd_value(struct radeon_device *rdev, u32 mclk,
1883
				  SMU7_Discrete_VoltageLevel *voltage)
1884
{
1885
	struct ci_power_info *pi = ci_get_pi(rdev);
1886
	u32 i = 0;
1887
 
1888
	if (pi->mvdd_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
1889
		for (i = 0; i < rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.count; i++) {
1890
			if (mclk <= rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries[i].clk) {
1891
				voltage->Voltage = pi->mvdd_voltage_table.entries[i].value;
1892
				break;
1893
			}
1894
		}
1895
 
1896
		if (i >= rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.count)
1897
			return -EINVAL;
1898
	}
1899
 
1900
	return -EINVAL;
1901
}
1902
 
1903
static int ci_get_std_voltage_value_sidd(struct radeon_device *rdev,
1904
					 struct atom_voltage_table_entry *voltage_table,
1905
					 u16 *std_voltage_hi_sidd, u16 *std_voltage_lo_sidd)
1906
{
1907
	u16 v_index, idx;
1908
	bool voltage_found = false;
1909
	*std_voltage_hi_sidd = voltage_table->value * VOLTAGE_SCALE;
1910
	*std_voltage_lo_sidd = voltage_table->value * VOLTAGE_SCALE;
1911
 
1912
	if (rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries == NULL)
1913
		return -EINVAL;
1914
 
1915
	if (rdev->pm.dpm.dyn_state.cac_leakage_table.entries) {
1916
		for (v_index = 0; (u32)v_index < rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) {
1917
			if (voltage_table->value ==
1918
			    rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) {
1919
				voltage_found = true;
1920
				if ((u32)v_index < rdev->pm.dpm.dyn_state.cac_leakage_table.count)
1921
					idx = v_index;
1922
				else
1923
					idx = rdev->pm.dpm.dyn_state.cac_leakage_table.count - 1;
1924
				*std_voltage_lo_sidd =
1925
					rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].vddc * VOLTAGE_SCALE;
1926
				*std_voltage_hi_sidd =
1927
					rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].leakage * VOLTAGE_SCALE;
1928
				break;
1929
			}
1930
		}
1931
 
1932
		if (!voltage_found) {
1933
			for (v_index = 0; (u32)v_index < rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) {
1934
				if (voltage_table->value <=
1935
				    rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) {
1936
					voltage_found = true;
1937
					if ((u32)v_index < rdev->pm.dpm.dyn_state.cac_leakage_table.count)
1938
						idx = v_index;
1939
					else
1940
						idx = rdev->pm.dpm.dyn_state.cac_leakage_table.count - 1;
1941
					*std_voltage_lo_sidd =
1942
						rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].vddc * VOLTAGE_SCALE;
1943
					*std_voltage_hi_sidd =
1944
						rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].leakage * VOLTAGE_SCALE;
1945
					break;
1946
				}
1947
			}
1948
		}
1949
	}
1950
 
1951
	return 0;
1952
}
1953
 
1954
static void ci_populate_phase_value_based_on_sclk(struct radeon_device *rdev,
1955
						  const struct radeon_phase_shedding_limits_table *limits,
1956
						  u32 sclk,
1957
						  u32 *phase_shedding)
1958
{
1959
	unsigned int i;
1960
 
1961
	*phase_shedding = 1;
1962
 
1963
	for (i = 0; i < limits->count; i++) {
1964
		if (sclk < limits->entries[i].sclk) {
1965
			*phase_shedding = i;
1966
			break;
1967
		}
1968
	}
1969
}
1970
 
1971
static void ci_populate_phase_value_based_on_mclk(struct radeon_device *rdev,
1972
						  const struct radeon_phase_shedding_limits_table *limits,
1973
						  u32 mclk,
1974
						  u32 *phase_shedding)
1975
{
1976
	unsigned int i;
1977
 
1978
	*phase_shedding = 1;
1979
 
1980
	for (i = 0; i < limits->count; i++) {
1981
		if (mclk < limits->entries[i].mclk) {
1982
			*phase_shedding = i;
1983
			break;
1984
		}
1985
	}
1986
}
1987
 
1988
static int ci_init_arb_table_index(struct radeon_device *rdev)
1989
{
1990
	struct ci_power_info *pi = ci_get_pi(rdev);
1991
	u32 tmp;
1992
	int ret;
1993
 
1994
	ret = ci_read_smc_sram_dword(rdev, pi->arb_table_start,
1995
				     &tmp, pi->sram_end);
1996
	if (ret)
1997
		return ret;
1998
 
1999
	tmp &= 0x00FFFFFF;
2000
	tmp |= MC_CG_ARB_FREQ_F1 << 24;
2001
 
2002
	return ci_write_smc_sram_dword(rdev, pi->arb_table_start,
2003
				       tmp, pi->sram_end);
2004
}
2005
 
2006
static int ci_get_dependency_volt_by_clk(struct radeon_device *rdev,
2007
					 struct radeon_clock_voltage_dependency_table *allowed_clock_voltage_table,
2008
					 u32 clock, u32 *voltage)
2009
{
2010
	u32 i = 0;
2011
 
2012
	if (allowed_clock_voltage_table->count == 0)
2013
		return -EINVAL;
2014
 
2015
	for (i = 0; i < allowed_clock_voltage_table->count; i++) {
2016
		if (allowed_clock_voltage_table->entries[i].clk >= clock) {
2017
			*voltage = allowed_clock_voltage_table->entries[i].v;
2018
			return 0;
2019
		}
2020
	}
2021
 
2022
	*voltage = allowed_clock_voltage_table->entries[i-1].v;
2023
 
2024
	return 0;
2025
}
2026
 
2027
static u8 ci_get_sleep_divider_id_from_clock(struct radeon_device *rdev,
2028
					     u32 sclk, u32 min_sclk_in_sr)
2029
{
2030
	u32 i;
2031
	u32 tmp;
2032
	u32 min = (min_sclk_in_sr > CISLAND_MINIMUM_ENGINE_CLOCK) ?
2033
		min_sclk_in_sr : CISLAND_MINIMUM_ENGINE_CLOCK;
2034
 
2035
	if (sclk < min)
2036
		return 0;
2037
 
2038
	for (i = CISLAND_MAX_DEEPSLEEP_DIVIDER_ID;  ; i--) {
2039
		tmp = sclk / (1 << i);
2040
		if (tmp >= min || i == 0)
2041
			break;
2042
	}
2043
 
2044
	return (u8)i;
2045
}
2046
 
2047
static int ci_initial_switch_from_arb_f0_to_f1(struct radeon_device *rdev)
2048
{
2049
	return ni_copy_and_switch_arb_sets(rdev, MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1);
2050
}
2051
 
2052
static int ci_reset_to_default(struct radeon_device *rdev)
2053
{
2054
	return (ci_send_msg_to_smc(rdev, PPSMC_MSG_ResetToDefaults) == PPSMC_Result_OK) ?
2055
 
2056
}
2057
 
2058
static int ci_force_switch_to_arb_f0(struct radeon_device *rdev)
2059
{
2060
	u32 tmp;
2061
 
2062
	tmp = (RREG32_SMC(SMC_SCRATCH9) & 0x0000ff00) >> 8;
2063
 
2064
	if (tmp == MC_CG_ARB_FREQ_F0)
2065
		return 0;
2066
 
2067
	return ni_copy_and_switch_arb_sets(rdev, tmp, MC_CG_ARB_FREQ_F0);
2068
}
2069
 
2070
static int ci_populate_memory_timing_parameters(struct radeon_device *rdev,
2071
						u32 sclk,
2072
						u32 mclk,
2073
						SMU7_Discrete_MCArbDramTimingTableEntry *arb_regs)
2074
{
2075
	u32 dram_timing;
2076
	u32 dram_timing2;
2077
	u32 burst_time;
2078
 
2079
	radeon_atom_set_engine_dram_timings(rdev, sclk, mclk);
2080
 
2081
	dram_timing  = RREG32(MC_ARB_DRAM_TIMING);
2082
	dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2);
2083
	burst_time = RREG32(MC_ARB_BURST_TIME) & STATE0_MASK;
2084
 
2085
	arb_regs->McArbDramTiming  = cpu_to_be32(dram_timing);
2086
	arb_regs->McArbDramTiming2 = cpu_to_be32(dram_timing2);
2087
	arb_regs->McArbBurstTime = (u8)burst_time;
2088
 
2089
	return 0;
2090
}
2091
 
2092
static int ci_do_program_memory_timing_parameters(struct radeon_device *rdev)
2093
{
2094
	struct ci_power_info *pi = ci_get_pi(rdev);
2095
	SMU7_Discrete_MCArbDramTimingTable arb_regs;
2096
	u32 i, j;
2097
	int ret =  0;
2098
 
2099
	memset(&arb_regs, 0, sizeof(SMU7_Discrete_MCArbDramTimingTable));
2100
 
2101
	for (i = 0; i < pi->dpm_table.sclk_table.count; i++) {
2102
		for (j = 0; j < pi->dpm_table.mclk_table.count; j++) {
2103
			ret = ci_populate_memory_timing_parameters(rdev,
2104
								   pi->dpm_table.sclk_table.dpm_levels[i].value,
2105
								   pi->dpm_table.mclk_table.dpm_levels[j].value,
2106
								   &arb_regs.entries[i][j]);
2107
			if (ret)
2108
				break;
2109
		}
2110
	}
2111
 
2112
	if (ret == 0)
2113
		ret = ci_copy_bytes_to_smc(rdev,
2114
					   pi->arb_table_start,
2115
					   (u8 *)&arb_regs,
2116
					   sizeof(SMU7_Discrete_MCArbDramTimingTable),
2117
					   pi->sram_end);
2118
 
2119
	return ret;
2120
}
2121
 
2122
static int ci_program_memory_timing_parameters(struct radeon_device *rdev)
2123
{
2124
	struct ci_power_info *pi = ci_get_pi(rdev);
2125
 
2126
	if (pi->need_update_smu7_dpm_table == 0)
2127
		return 0;
2128
 
2129
	return ci_do_program_memory_timing_parameters(rdev);
2130
}
2131
 
2132
static void ci_populate_smc_initial_state(struct radeon_device *rdev,
2133
					  struct radeon_ps *radeon_boot_state)
2134
{
2135
	struct ci_ps *boot_state = ci_get_ps(radeon_boot_state);
2136
	struct ci_power_info *pi = ci_get_pi(rdev);
2137
	u32 level = 0;
2138
 
2139
	for (level = 0; level < rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; level++) {
2140
		if (rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[level].clk >=
2141
		    boot_state->performance_levels[0].sclk) {
2142
			pi->smc_state_table.GraphicsBootLevel = level;
2143
			break;
2144
		}
2145
	}
2146
 
2147
	for (level = 0; level < rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.count; level++) {
2148
		if (rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries[level].clk >=
2149
		    boot_state->performance_levels[0].mclk) {
2150
			pi->smc_state_table.MemoryBootLevel = level;
2151
			break;
2152
		}
2153
	}
2154
}
2155
 
2156
static u32 ci_get_dpm_level_enable_mask_value(struct ci_single_dpm_table *dpm_table)
2157
{
2158
	u32 i;
2159
	u32 mask_value = 0;
2160
 
2161
	for (i = dpm_table->count; i > 0; i--) {
2162
		mask_value = mask_value << 1;
2163
		if (dpm_table->dpm_levels[i-1].enabled)
2164
			mask_value |= 0x1;
2165
		else
2166
			mask_value &= 0xFFFFFFFE;
2167
	}
2168
 
2169
	return mask_value;
2170
}
2171
 
2172
static void ci_populate_smc_link_level(struct radeon_device *rdev,
2173
				       SMU7_Discrete_DpmTable *table)
2174
{
2175
	struct ci_power_info *pi = ci_get_pi(rdev);
2176
	struct ci_dpm_table *dpm_table = &pi->dpm_table;
2177
	u32 i;
2178
 
2179
	for (i = 0; i < dpm_table->pcie_speed_table.count; i++) {
2180
		table->LinkLevel[i].PcieGenSpeed =
2181
			(u8)dpm_table->pcie_speed_table.dpm_levels[i].value;
2182
		table->LinkLevel[i].PcieLaneCount =
2183
			r600_encode_pci_lane_width(dpm_table->pcie_speed_table.dpm_levels[i].param1);
2184
		table->LinkLevel[i].EnabledForActivity = 1;
2185
		table->LinkLevel[i].DownT = cpu_to_be32(5);
2186
		table->LinkLevel[i].UpT = cpu_to_be32(30);
2187
	}
2188
 
2189
	pi->smc_state_table.LinkLevelCount = (u8)dpm_table->pcie_speed_table.count;
2190
	pi->dpm_level_enable_mask.pcie_dpm_enable_mask =
2191
		ci_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
2192
}
2193
 
2194
static int ci_populate_smc_uvd_level(struct radeon_device *rdev,
2195
				     SMU7_Discrete_DpmTable *table)
2196
{
2197
	u32 count;
2198
	struct atom_clock_dividers dividers;
2199
	int ret = -EINVAL;
2200
 
2201
	table->UvdLevelCount =
2202
		rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count;
2203
 
2204
	for (count = 0; count < table->UvdLevelCount; count++) {
2205
		table->UvdLevel[count].VclkFrequency =
2206
			rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].vclk;
2207
		table->UvdLevel[count].DclkFrequency =
2208
			rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].dclk;
2209
		table->UvdLevel[count].MinVddc =
2210
			rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
2211
		table->UvdLevel[count].MinVddcPhases = 1;
2212
 
2213
		ret = radeon_atom_get_clock_dividers(rdev,
2214
						     COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2215
						     table->UvdLevel[count].VclkFrequency, false, ÷rs);
2216
		if (ret)
2217
			return ret;
2218
 
2219
		table->UvdLevel[count].VclkDivider = (u8)dividers.post_divider;
2220
 
2221
		ret = radeon_atom_get_clock_dividers(rdev,
2222
						     COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2223
						     table->UvdLevel[count].DclkFrequency, false, ÷rs);
2224
		if (ret)
2225
			return ret;
2226
 
2227
		table->UvdLevel[count].DclkDivider = (u8)dividers.post_divider;
2228
 
2229
		table->UvdLevel[count].VclkFrequency = cpu_to_be32(table->UvdLevel[count].VclkFrequency);
2230
		table->UvdLevel[count].DclkFrequency = cpu_to_be32(table->UvdLevel[count].DclkFrequency);
2231
		table->UvdLevel[count].MinVddc = cpu_to_be16(table->UvdLevel[count].MinVddc);
2232
	}
2233
 
2234
	return ret;
2235
}
2236
 
2237
static int ci_populate_smc_vce_level(struct radeon_device *rdev,
2238
				     SMU7_Discrete_DpmTable *table)
2239
{
2240
	u32 count;
2241
	struct atom_clock_dividers dividers;
2242
	int ret = -EINVAL;
2243
 
2244
	table->VceLevelCount =
2245
		rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count;
2246
 
2247
	for (count = 0; count < table->VceLevelCount; count++) {
2248
		table->VceLevel[count].Frequency =
2249
			rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[count].evclk;
2250
		table->VceLevel[count].MinVoltage =
2251
			(u16)rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
2252
		table->VceLevel[count].MinPhases = 1;
2253
 
2254
		ret = radeon_atom_get_clock_dividers(rdev,
2255
						     COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2256
						     table->VceLevel[count].Frequency, false, ÷rs);
2257
		if (ret)
2258
			return ret;
2259
 
2260
		table->VceLevel[count].Divider = (u8)dividers.post_divider;
2261
 
2262
		table->VceLevel[count].Frequency = cpu_to_be32(table->VceLevel[count].Frequency);
2263
		table->VceLevel[count].MinVoltage = cpu_to_be16(table->VceLevel[count].MinVoltage);
2264
	}
2265
 
2266
	return ret;
2267
 
2268
}
2269
 
2270
static int ci_populate_smc_acp_level(struct radeon_device *rdev,
2271
				     SMU7_Discrete_DpmTable *table)
2272
{
2273
	u32 count;
2274
	struct atom_clock_dividers dividers;
2275
	int ret = -EINVAL;
2276
 
2277
	table->AcpLevelCount = (u8)
2278
		(rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count);
2279
 
2280
	for (count = 0; count < table->AcpLevelCount; count++) {
2281
		table->AcpLevel[count].Frequency =
2282
			rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[count].clk;
2283
		table->AcpLevel[count].MinVoltage =
2284
			rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[count].v;
2285
		table->AcpLevel[count].MinPhases = 1;
2286
 
2287
		ret = radeon_atom_get_clock_dividers(rdev,
2288
						     COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2289
						     table->AcpLevel[count].Frequency, false, ÷rs);
2290
		if (ret)
2291
			return ret;
2292
 
2293
		table->AcpLevel[count].Divider = (u8)dividers.post_divider;
2294
 
2295
		table->AcpLevel[count].Frequency = cpu_to_be32(table->AcpLevel[count].Frequency);
2296
		table->AcpLevel[count].MinVoltage = cpu_to_be16(table->AcpLevel[count].MinVoltage);
2297
	}
2298
 
2299
	return ret;
2300
}
2301
 
2302
static int ci_populate_smc_samu_level(struct radeon_device *rdev,
2303
				      SMU7_Discrete_DpmTable *table)
2304
{
2305
	u32 count;
2306
	struct atom_clock_dividers dividers;
2307
	int ret = -EINVAL;
2308
 
2309
	table->SamuLevelCount =
2310
		rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count;
2311
 
2312
	for (count = 0; count < table->SamuLevelCount; count++) {
2313
		table->SamuLevel[count].Frequency =
2314
			rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[count].clk;
2315
		table->SamuLevel[count].MinVoltage =
2316
			rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
2317
		table->SamuLevel[count].MinPhases = 1;
2318
 
2319
		ret = radeon_atom_get_clock_dividers(rdev,
2320
						     COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2321
						     table->SamuLevel[count].Frequency, false, ÷rs);
2322
		if (ret)
2323
			return ret;
2324
 
2325
		table->SamuLevel[count].Divider = (u8)dividers.post_divider;
2326
 
2327
		table->SamuLevel[count].Frequency = cpu_to_be32(table->SamuLevel[count].Frequency);
2328
		table->SamuLevel[count].MinVoltage = cpu_to_be16(table->SamuLevel[count].MinVoltage);
2329
	}
2330
 
2331
	return ret;
2332
}
2333
 
2334
static int ci_calculate_mclk_params(struct radeon_device *rdev,
2335
				    u32 memory_clock,
2336
				    SMU7_Discrete_MemoryLevel *mclk,
2337
				    bool strobe_mode,
2338
				    bool dll_state_on)
2339
{
2340
	struct ci_power_info *pi = ci_get_pi(rdev);
2341
	u32  dll_cntl = pi->clock_registers.dll_cntl;
2342
	u32  mclk_pwrmgt_cntl = pi->clock_registers.mclk_pwrmgt_cntl;
2343
	u32  mpll_ad_func_cntl = pi->clock_registers.mpll_ad_func_cntl;
2344
	u32  mpll_dq_func_cntl = pi->clock_registers.mpll_dq_func_cntl;
2345
	u32  mpll_func_cntl = pi->clock_registers.mpll_func_cntl;
2346
	u32  mpll_func_cntl_1 = pi->clock_registers.mpll_func_cntl_1;
2347
	u32  mpll_func_cntl_2 = pi->clock_registers.mpll_func_cntl_2;
2348
	u32  mpll_ss1 = pi->clock_registers.mpll_ss1;
2349
	u32  mpll_ss2 = pi->clock_registers.mpll_ss2;
2350
	struct atom_mpll_param mpll_param;
2351
	int ret;
2352
 
2353
	ret = radeon_atom_get_memory_pll_dividers(rdev, memory_clock, strobe_mode, &mpll_param);
2354
	if (ret)
2355
		return ret;
2356
 
2357
	mpll_func_cntl &= ~BWCTRL_MASK;
2358
	mpll_func_cntl |= BWCTRL(mpll_param.bwcntl);
2359
 
2360
	mpll_func_cntl_1 &= ~(CLKF_MASK | CLKFRAC_MASK | VCO_MODE_MASK);
2361
	mpll_func_cntl_1 |= CLKF(mpll_param.clkf) |
2362
		CLKFRAC(mpll_param.clkfrac) | VCO_MODE(mpll_param.vco_mode);
2363
 
2364
	mpll_ad_func_cntl &= ~YCLK_POST_DIV_MASK;
2365
	mpll_ad_func_cntl |= YCLK_POST_DIV(mpll_param.post_div);
2366
 
2367
	if (pi->mem_gddr5) {
2368
		mpll_dq_func_cntl &= ~(YCLK_SEL_MASK | YCLK_POST_DIV_MASK);
2369
		mpll_dq_func_cntl |= YCLK_SEL(mpll_param.yclk_sel) |
2370
			YCLK_POST_DIV(mpll_param.post_div);
2371
	}
2372
 
2373
	if (pi->caps_mclk_ss_support) {
2374
		struct radeon_atom_ss ss;
2375
		u32 freq_nom;
2376
		u32 tmp;
2377
		u32 reference_clock = rdev->clock.mpll.reference_freq;
2378
 
2379
		if (pi->mem_gddr5)
2380
			freq_nom = memory_clock * 4;
2381
		else
2382
			freq_nom = memory_clock * 2;
2383
 
2384
		tmp = (freq_nom / reference_clock);
2385
		tmp = tmp * tmp;
2386
		if (radeon_atombios_get_asic_ss_info(rdev, &ss,
2387
						     ASIC_INTERNAL_MEMORY_SS, freq_nom)) {
2388
			u32 clks = reference_clock * 5 / ss.rate;
2389
			u32 clkv = (u32)((((131 * ss.percentage * ss.rate) / 100) * tmp) / freq_nom);
2390
 
2391
			mpll_ss1 &= ~CLKV_MASK;
2392
			mpll_ss1 |= CLKV(clkv);
2393
 
2394
			mpll_ss2 &= ~CLKS_MASK;
2395
			mpll_ss2 |= CLKS(clks);
2396
		}
2397
	}
2398
 
2399
	mclk_pwrmgt_cntl &= ~DLL_SPEED_MASK;
2400
	mclk_pwrmgt_cntl |= DLL_SPEED(mpll_param.dll_speed);
2401
 
2402
	if (dll_state_on)
2403
		mclk_pwrmgt_cntl |= MRDCK0_PDNB | MRDCK1_PDNB;
2404
	else
2405
		mclk_pwrmgt_cntl &= ~(MRDCK0_PDNB | MRDCK1_PDNB);
2406
 
2407
	mclk->MclkFrequency = memory_clock;
2408
	mclk->MpllFuncCntl = mpll_func_cntl;
2409
	mclk->MpllFuncCntl_1 = mpll_func_cntl_1;
2410
	mclk->MpllFuncCntl_2 = mpll_func_cntl_2;
2411
	mclk->MpllAdFuncCntl = mpll_ad_func_cntl;
2412
	mclk->MpllDqFuncCntl = mpll_dq_func_cntl;
2413
	mclk->MclkPwrmgtCntl = mclk_pwrmgt_cntl;
2414
	mclk->DllCntl = dll_cntl;
2415
	mclk->MpllSs1 = mpll_ss1;
2416
	mclk->MpllSs2 = mpll_ss2;
2417
 
2418
	return 0;
2419
}
2420
 
2421
static int ci_populate_single_memory_level(struct radeon_device *rdev,
2422
					   u32 memory_clock,
2423
					   SMU7_Discrete_MemoryLevel *memory_level)
2424
{
2425
	struct ci_power_info *pi = ci_get_pi(rdev);
2426
	int ret;
2427
	bool dll_state_on;
2428
 
2429
	if (rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries) {
2430
		ret = ci_get_dependency_volt_by_clk(rdev,
2431
						    &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
2432
						    memory_clock, &memory_level->MinVddc);
2433
		if (ret)
2434
			return ret;
2435
	}
2436
 
2437
	if (rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries) {
2438
		ret = ci_get_dependency_volt_by_clk(rdev,
2439
						    &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
2440
						    memory_clock, &memory_level->MinVddci);
2441
		if (ret)
2442
			return ret;
2443
	}
2444
 
2445
	if (rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries) {
2446
		ret = ci_get_dependency_volt_by_clk(rdev,
2447
						    &rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
2448
						    memory_clock, &memory_level->MinMvdd);
2449
		if (ret)
2450
			return ret;
2451
	}
2452
 
2453
	memory_level->MinVddcPhases = 1;
2454
 
2455
	if (pi->vddc_phase_shed_control)
2456
		ci_populate_phase_value_based_on_mclk(rdev,
2457
						      &rdev->pm.dpm.dyn_state.phase_shedding_limits_table,
2458
						      memory_clock,
2459
						      &memory_level->MinVddcPhases);
2460
 
2461
	memory_level->EnabledForThrottle = 1;
2462
	memory_level->EnabledForActivity = 1;
2463
	memory_level->UpH = 0;
2464
	memory_level->DownH = 100;
2465
	memory_level->VoltageDownH = 0;
2466
	memory_level->ActivityLevel = (u16)pi->mclk_activity_target;
2467
 
2468
	memory_level->StutterEnable = false;
2469
	memory_level->StrobeEnable = false;
2470
	memory_level->EdcReadEnable = false;
2471
	memory_level->EdcWriteEnable = false;
2472
	memory_level->RttEnable = false;
2473
 
2474
	memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
2475
 
2476
	if (pi->mclk_stutter_mode_threshold &&
2477
	    (memory_clock <= pi->mclk_stutter_mode_threshold) &&
2478
	    (pi->uvd_enabled == false) &&
2479
	    (RREG32(DPG_PIPE_STUTTER_CONTROL) & STUTTER_ENABLE) &&
2480
	    (rdev->pm.dpm.new_active_crtc_count <= 2))
2481
		memory_level->StutterEnable = true;
2482
 
2483
	if (pi->mclk_strobe_mode_threshold &&
2484
	    (memory_clock <= pi->mclk_strobe_mode_threshold))
2485
		memory_level->StrobeEnable = 1;
2486
 
2487
	if (pi->mem_gddr5) {
2488
		memory_level->StrobeRatio =
2489
			si_get_mclk_frequency_ratio(memory_clock, memory_level->StrobeEnable);
2490
		if (pi->mclk_edc_enable_threshold &&
2491
		    (memory_clock > pi->mclk_edc_enable_threshold))
2492
			memory_level->EdcReadEnable = true;
2493
 
2494
		if (pi->mclk_edc_wr_enable_threshold &&
2495
		    (memory_clock > pi->mclk_edc_wr_enable_threshold))
2496
			memory_level->EdcWriteEnable = true;
2497
 
2498
		if (memory_level->StrobeEnable) {
2499
			if (si_get_mclk_frequency_ratio(memory_clock, true) >=
2500
			    ((RREG32(MC_SEQ_MISC7) >> 16) & 0xf))
2501
				dll_state_on = ((RREG32(MC_SEQ_MISC5) >> 1) & 0x1) ? true : false;
2502
			else
2503
				dll_state_on = ((RREG32(MC_SEQ_MISC6) >> 1) & 0x1) ? true : false;
2504
		} else {
2505
			dll_state_on = pi->dll_default_on;
2506
		}
2507
	} else {
2508
		memory_level->StrobeRatio = si_get_ddr3_mclk_frequency_ratio(memory_clock);
2509
		dll_state_on = ((RREG32(MC_SEQ_MISC5) >> 1) & 0x1) ? true : false;
2510
	}
2511
 
2512
	ret = ci_calculate_mclk_params(rdev, memory_clock, memory_level, memory_level->StrobeEnable, dll_state_on);
2513
	if (ret)
2514
		return ret;
2515
 
2516
	memory_level->MinVddc = cpu_to_be32(memory_level->MinVddc * VOLTAGE_SCALE);
2517
	memory_level->MinVddcPhases = cpu_to_be32(memory_level->MinVddcPhases);
2518
        memory_level->MinVddci = cpu_to_be32(memory_level->MinVddci * VOLTAGE_SCALE);
2519
        memory_level->MinMvdd = cpu_to_be32(memory_level->MinMvdd * VOLTAGE_SCALE);
2520
 
2521
	memory_level->MclkFrequency = cpu_to_be32(memory_level->MclkFrequency);
2522
	memory_level->ActivityLevel = cpu_to_be16(memory_level->ActivityLevel);
2523
	memory_level->MpllFuncCntl = cpu_to_be32(memory_level->MpllFuncCntl);
2524
	memory_level->MpllFuncCntl_1 = cpu_to_be32(memory_level->MpllFuncCntl_1);
2525
	memory_level->MpllFuncCntl_2 = cpu_to_be32(memory_level->MpllFuncCntl_2);
2526
	memory_level->MpllAdFuncCntl = cpu_to_be32(memory_level->MpllAdFuncCntl);
2527
	memory_level->MpllDqFuncCntl = cpu_to_be32(memory_level->MpllDqFuncCntl);
2528
	memory_level->MclkPwrmgtCntl = cpu_to_be32(memory_level->MclkPwrmgtCntl);
2529
	memory_level->DllCntl = cpu_to_be32(memory_level->DllCntl);
2530
	memory_level->MpllSs1 = cpu_to_be32(memory_level->MpllSs1);
2531
	memory_level->MpllSs2 = cpu_to_be32(memory_level->MpllSs2);
2532
 
2533
	return 0;
2534
}
2535
 
2536
static int ci_populate_smc_acpi_level(struct radeon_device *rdev,
2537
				      SMU7_Discrete_DpmTable *table)
2538
{
2539
	struct ci_power_info *pi = ci_get_pi(rdev);
2540
	struct atom_clock_dividers dividers;
2541
	SMU7_Discrete_VoltageLevel voltage_level;
2542
	u32 spll_func_cntl = pi->clock_registers.cg_spll_func_cntl;
2543
	u32 spll_func_cntl_2 = pi->clock_registers.cg_spll_func_cntl_2;
2544
	u32 dll_cntl = pi->clock_registers.dll_cntl;
2545
	u32 mclk_pwrmgt_cntl = pi->clock_registers.mclk_pwrmgt_cntl;
2546
	int ret;
2547
 
2548
	table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
2549
 
2550
	if (pi->acpi_vddc)
2551
		table->ACPILevel.MinVddc = cpu_to_be32(pi->acpi_vddc * VOLTAGE_SCALE);
2552
	else
2553
		table->ACPILevel.MinVddc = cpu_to_be32(pi->min_vddc_in_pp_table * VOLTAGE_SCALE);
2554
 
2555
	table->ACPILevel.MinVddcPhases = pi->vddc_phase_shed_control ? 0 : 1;
2556
 
2557
	table->ACPILevel.SclkFrequency = rdev->clock.spll.reference_freq;
2558
 
2559
	ret = radeon_atom_get_clock_dividers(rdev,
2560
					     COMPUTE_GPUCLK_INPUT_FLAG_SCLK,
2561
					     table->ACPILevel.SclkFrequency, false, ÷rs);
2562
	if (ret)
2563
		return ret;
2564
 
2565
	table->ACPILevel.SclkDid = (u8)dividers.post_divider;
2566
	table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
2567
	table->ACPILevel.DeepSleepDivId = 0;
2568
 
2569
	spll_func_cntl &= ~SPLL_PWRON;
2570
	spll_func_cntl |= SPLL_RESET;
2571
 
2572
	spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK;
2573
	spll_func_cntl_2 |= SCLK_MUX_SEL(4);
2574
 
2575
	table->ACPILevel.CgSpllFuncCntl = spll_func_cntl;
2576
	table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2;
2577
	table->ACPILevel.CgSpllFuncCntl3 = pi->clock_registers.cg_spll_func_cntl_3;
2578
	table->ACPILevel.CgSpllFuncCntl4 = pi->clock_registers.cg_spll_func_cntl_4;
2579
	table->ACPILevel.SpllSpreadSpectrum = pi->clock_registers.cg_spll_spread_spectrum;
2580
	table->ACPILevel.SpllSpreadSpectrum2 = pi->clock_registers.cg_spll_spread_spectrum_2;
2581
	table->ACPILevel.CcPwrDynRm = 0;
2582
	table->ACPILevel.CcPwrDynRm1 = 0;
2583
 
2584
	table->ACPILevel.Flags = cpu_to_be32(table->ACPILevel.Flags);
2585
	table->ACPILevel.MinVddcPhases = cpu_to_be32(table->ACPILevel.MinVddcPhases);
2586
	table->ACPILevel.SclkFrequency = cpu_to_be32(table->ACPILevel.SclkFrequency);
2587
	table->ACPILevel.CgSpllFuncCntl = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl);
2588
	table->ACPILevel.CgSpllFuncCntl2 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl2);
2589
	table->ACPILevel.CgSpllFuncCntl3 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl3);
2590
	table->ACPILevel.CgSpllFuncCntl4 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl4);
2591
	table->ACPILevel.SpllSpreadSpectrum = cpu_to_be32(table->ACPILevel.SpllSpreadSpectrum);
2592
	table->ACPILevel.SpllSpreadSpectrum2 = cpu_to_be32(table->ACPILevel.SpllSpreadSpectrum2);
2593
	table->ACPILevel.CcPwrDynRm = cpu_to_be32(table->ACPILevel.CcPwrDynRm);
2594
	table->ACPILevel.CcPwrDynRm1 = cpu_to_be32(table->ACPILevel.CcPwrDynRm1);
2595
 
2596
	table->MemoryACPILevel.MinVddc = table->ACPILevel.MinVddc;
2597
	table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;
2598
 
2599
	if (pi->vddci_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
2600
		if (pi->acpi_vddci)
2601
			table->MemoryACPILevel.MinVddci =
2602
				cpu_to_be32(pi->acpi_vddci * VOLTAGE_SCALE);
2603
		else
2604
			table->MemoryACPILevel.MinVddci =
2605
				cpu_to_be32(pi->min_vddci_in_pp_table * VOLTAGE_SCALE);
2606
	}
2607
 
2608
	if (ci_populate_mvdd_value(rdev, 0, &voltage_level))
2609
		table->MemoryACPILevel.MinMvdd = 0;
2610
	else
2611
		table->MemoryACPILevel.MinMvdd =
2612
			cpu_to_be32(voltage_level.Voltage * VOLTAGE_SCALE);
2613
 
2614
	mclk_pwrmgt_cntl |= MRDCK0_RESET | MRDCK1_RESET;
2615
	mclk_pwrmgt_cntl &= ~(MRDCK0_PDNB | MRDCK1_PDNB);
2616
 
2617
	dll_cntl &= ~(MRDCK0_BYPASS | MRDCK1_BYPASS);
2618
 
2619
	table->MemoryACPILevel.DllCntl = cpu_to_be32(dll_cntl);
2620
	table->MemoryACPILevel.MclkPwrmgtCntl = cpu_to_be32(mclk_pwrmgt_cntl);
2621
	table->MemoryACPILevel.MpllAdFuncCntl =
2622
		cpu_to_be32(pi->clock_registers.mpll_ad_func_cntl);
2623
	table->MemoryACPILevel.MpllDqFuncCntl =
2624
		cpu_to_be32(pi->clock_registers.mpll_dq_func_cntl);
2625
	table->MemoryACPILevel.MpllFuncCntl =
2626
		cpu_to_be32(pi->clock_registers.mpll_func_cntl);
2627
	table->MemoryACPILevel.MpllFuncCntl_1 =
2628
		cpu_to_be32(pi->clock_registers.mpll_func_cntl_1);
2629
	table->MemoryACPILevel.MpllFuncCntl_2 =
2630
		cpu_to_be32(pi->clock_registers.mpll_func_cntl_2);
2631
	table->MemoryACPILevel.MpllSs1 = cpu_to_be32(pi->clock_registers.mpll_ss1);
2632
	table->MemoryACPILevel.MpllSs2 = cpu_to_be32(pi->clock_registers.mpll_ss2);
2633
 
2634
	table->MemoryACPILevel.EnabledForThrottle = 0;
2635
	table->MemoryACPILevel.EnabledForActivity = 0;
2636
	table->MemoryACPILevel.UpH = 0;
2637
	table->MemoryACPILevel.DownH = 100;
2638
	table->MemoryACPILevel.VoltageDownH = 0;
2639
	table->MemoryACPILevel.ActivityLevel =
2640
		cpu_to_be16((u16)pi->mclk_activity_target);
2641
 
2642
	table->MemoryACPILevel.StutterEnable = false;
2643
	table->MemoryACPILevel.StrobeEnable = false;
2644
	table->MemoryACPILevel.EdcReadEnable = false;
2645
	table->MemoryACPILevel.EdcWriteEnable = false;
2646
	table->MemoryACPILevel.RttEnable = false;
2647
 
2648
	return 0;
2649
}
2650
 
2651
 
2652
static int ci_enable_ulv(struct radeon_device *rdev, bool enable)
2653
{
2654
	struct ci_power_info *pi = ci_get_pi(rdev);
2655
	struct ci_ulv_parm *ulv = &pi->ulv;
2656
 
2657
	if (ulv->supported) {
2658
		if (enable)
2659
			return (ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableULV) == PPSMC_Result_OK) ?
2660
 
2661
		else
2662
			return (ci_send_msg_to_smc(rdev, PPSMC_MSG_DisableULV) == PPSMC_Result_OK) ?
2663
 
2664
	}
2665
 
2666
	return 0;
2667
}
2668
 
2669
static int ci_populate_ulv_level(struct radeon_device *rdev,
2670
				 SMU7_Discrete_Ulv *state)
2671
{
2672
	struct ci_power_info *pi = ci_get_pi(rdev);
2673
	u16 ulv_voltage = rdev->pm.dpm.backbias_response_time;
2674
 
2675
	state->CcPwrDynRm = 0;
2676
	state->CcPwrDynRm1 = 0;
2677
 
2678
	if (ulv_voltage == 0) {
2679
		pi->ulv.supported = false;
2680
		return 0;
2681
	}
2682
 
2683
	if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
2684
		if (ulv_voltage > rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v)
2685
			state->VddcOffset = 0;
2686
		else
2687
			state->VddcOffset =
2688
				rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v - ulv_voltage;
2689
	} else {
2690
		if (ulv_voltage > rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v)
2691
			state->VddcOffsetVid = 0;
2692
		else
2693
			state->VddcOffsetVid = (u8)
2694
				((rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v - ulv_voltage) *
2695
				 VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1);
2696
	}
2697
	state->VddcPhase = pi->vddc_phase_shed_control ? 0 : 1;
2698
 
2699
	state->CcPwrDynRm = cpu_to_be32(state->CcPwrDynRm);
2700
	state->CcPwrDynRm1 = cpu_to_be32(state->CcPwrDynRm1);
2701
	state->VddcOffset = cpu_to_be16(state->VddcOffset);
2702
 
2703
	return 0;
2704
}
2705
 
2706
static int ci_calculate_sclk_params(struct radeon_device *rdev,
2707
				    u32 engine_clock,
2708
				    SMU7_Discrete_GraphicsLevel *sclk)
2709
{
2710
	struct ci_power_info *pi = ci_get_pi(rdev);
2711
	struct atom_clock_dividers dividers;
2712
	u32 spll_func_cntl_3 = pi->clock_registers.cg_spll_func_cntl_3;
2713
	u32 spll_func_cntl_4 = pi->clock_registers.cg_spll_func_cntl_4;
2714
	u32 cg_spll_spread_spectrum = pi->clock_registers.cg_spll_spread_spectrum;
2715
	u32 cg_spll_spread_spectrum_2 = pi->clock_registers.cg_spll_spread_spectrum_2;
2716
	u32 reference_clock = rdev->clock.spll.reference_freq;
2717
	u32 reference_divider;
2718
	u32 fbdiv;
2719
	int ret;
2720
 
2721
	ret = radeon_atom_get_clock_dividers(rdev,
2722
					     COMPUTE_GPUCLK_INPUT_FLAG_SCLK,
2723
					     engine_clock, false, ÷rs);
2724
	if (ret)
2725
		return ret;
2726
 
2727
	reference_divider = 1 + dividers.ref_div;
2728
	fbdiv = dividers.fb_div & 0x3FFFFFF;
2729
 
2730
	spll_func_cntl_3 &= ~SPLL_FB_DIV_MASK;
2731
	spll_func_cntl_3 |= SPLL_FB_DIV(fbdiv);
2732
        spll_func_cntl_3 |= SPLL_DITHEN;
2733
 
2734
	if (pi->caps_sclk_ss_support) {
2735
		struct radeon_atom_ss ss;
2736
		u32 vco_freq = engine_clock * dividers.post_div;
2737
 
2738
		if (radeon_atombios_get_asic_ss_info(rdev, &ss,
2739
						     ASIC_INTERNAL_ENGINE_SS, vco_freq)) {
2740
			u32 clk_s = reference_clock * 5 / (reference_divider * ss.rate);
2741
			u32 clk_v = 4 * ss.percentage * fbdiv / (clk_s * 10000);
2742
 
2743
			cg_spll_spread_spectrum &= ~CLK_S_MASK;
2744
			cg_spll_spread_spectrum |= CLK_S(clk_s);
2745
			cg_spll_spread_spectrum |= SSEN;
2746
 
2747
			cg_spll_spread_spectrum_2 &= ~CLK_V_MASK;
2748
			cg_spll_spread_spectrum_2 |= CLK_V(clk_v);
2749
		}
2750
	}
2751
 
2752
	sclk->SclkFrequency = engine_clock;
2753
	sclk->CgSpllFuncCntl3 = spll_func_cntl_3;
2754
	sclk->CgSpllFuncCntl4 = spll_func_cntl_4;
2755
	sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum;
2756
	sclk->SpllSpreadSpectrum2  = cg_spll_spread_spectrum_2;
2757
	sclk->SclkDid = (u8)dividers.post_divider;
2758
 
2759
	return 0;
2760
}
2761
 
2762
static int ci_populate_single_graphic_level(struct radeon_device *rdev,
2763
					    u32 engine_clock,
2764
					    u16 sclk_activity_level_t,
2765
					    SMU7_Discrete_GraphicsLevel *graphic_level)
2766
{
2767
	struct ci_power_info *pi = ci_get_pi(rdev);
2768
	int ret;
2769
 
2770
	ret = ci_calculate_sclk_params(rdev, engine_clock, graphic_level);
2771
	if (ret)
2772
		return ret;
2773
 
2774
	ret = ci_get_dependency_volt_by_clk(rdev,
2775
					    &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
2776
					    engine_clock, &graphic_level->MinVddc);
2777
	if (ret)
2778
		return ret;
2779
 
2780
	graphic_level->SclkFrequency = engine_clock;
2781
 
2782
	graphic_level->Flags =  0;
2783
	graphic_level->MinVddcPhases = 1;
2784
 
2785
	if (pi->vddc_phase_shed_control)
2786
		ci_populate_phase_value_based_on_sclk(rdev,
2787
						      &rdev->pm.dpm.dyn_state.phase_shedding_limits_table,
2788
						      engine_clock,
2789
						      &graphic_level->MinVddcPhases);
2790
 
2791
	graphic_level->ActivityLevel = sclk_activity_level_t;
2792
 
2793
	graphic_level->CcPwrDynRm = 0;
2794
	graphic_level->CcPwrDynRm1 = 0;
2795
	graphic_level->EnabledForActivity = 1;
2796
	graphic_level->EnabledForThrottle = 1;
2797
	graphic_level->UpH = 0;
2798
	graphic_level->DownH = 0;
2799
	graphic_level->VoltageDownH = 0;
2800
	graphic_level->PowerThrottle = 0;
2801
 
2802
	if (pi->caps_sclk_ds)
2803
		graphic_level->DeepSleepDivId = ci_get_sleep_divider_id_from_clock(rdev,
2804
										   engine_clock,
2805
										   CISLAND_MINIMUM_ENGINE_CLOCK);
2806
 
2807
	graphic_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
2808
 
2809
	graphic_level->Flags = cpu_to_be32(graphic_level->Flags);
2810
        graphic_level->MinVddc = cpu_to_be32(graphic_level->MinVddc * VOLTAGE_SCALE);
2811
	graphic_level->MinVddcPhases = cpu_to_be32(graphic_level->MinVddcPhases);
2812
	graphic_level->SclkFrequency = cpu_to_be32(graphic_level->SclkFrequency);
2813
	graphic_level->ActivityLevel = cpu_to_be16(graphic_level->ActivityLevel);
2814
	graphic_level->CgSpllFuncCntl3 = cpu_to_be32(graphic_level->CgSpllFuncCntl3);
2815
	graphic_level->CgSpllFuncCntl4 = cpu_to_be32(graphic_level->CgSpllFuncCntl4);
2816
	graphic_level->SpllSpreadSpectrum = cpu_to_be32(graphic_level->SpllSpreadSpectrum);
2817
	graphic_level->SpllSpreadSpectrum2 = cpu_to_be32(graphic_level->SpllSpreadSpectrum2);
2818
	graphic_level->CcPwrDynRm = cpu_to_be32(graphic_level->CcPwrDynRm);
2819
	graphic_level->CcPwrDynRm1 = cpu_to_be32(graphic_level->CcPwrDynRm1);
2820
 
2821
	return 0;
2822
}
2823
 
2824
static int ci_populate_all_graphic_levels(struct radeon_device *rdev)
2825
{
2826
	struct ci_power_info *pi = ci_get_pi(rdev);
2827
	struct ci_dpm_table *dpm_table = &pi->dpm_table;
2828
	u32 level_array_address = pi->dpm_table_start +
2829
		offsetof(SMU7_Discrete_DpmTable, GraphicsLevel);
2830
	u32 level_array_size = sizeof(SMU7_Discrete_GraphicsLevel) *
2831
		SMU7_MAX_LEVELS_GRAPHICS;
2832
	SMU7_Discrete_GraphicsLevel *levels = pi->smc_state_table.GraphicsLevel;
2833
	u32 i, ret;
2834
 
2835
	memset(levels, 0, level_array_size);
2836
 
2837
	for (i = 0; i < dpm_table->sclk_table.count; i++) {
2838
		ret = ci_populate_single_graphic_level(rdev,
2839
						       dpm_table->sclk_table.dpm_levels[i].value,
2840
						       (u16)pi->activity_target[i],
2841
						       &pi->smc_state_table.GraphicsLevel[i]);
2842
		if (ret)
2843
			return ret;
2844
		if (i == (dpm_table->sclk_table.count - 1))
2845
			pi->smc_state_table.GraphicsLevel[i].DisplayWatermark =
2846
				PPSMC_DISPLAY_WATERMARK_HIGH;
2847
	}
2848
 
2849
	pi->smc_state_table.GraphicsDpmLevelCount = (u8)dpm_table->sclk_table.count;
2850
	pi->dpm_level_enable_mask.sclk_dpm_enable_mask =
2851
		ci_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
2852
 
2853
	ret = ci_copy_bytes_to_smc(rdev, level_array_address,
2854
				   (u8 *)levels, level_array_size,
2855
				   pi->sram_end);
2856
	if (ret)
2857
		return ret;
2858
 
2859
	return 0;
2860
}
2861
 
2862
static int ci_populate_ulv_state(struct radeon_device *rdev,
2863
				 SMU7_Discrete_Ulv *ulv_level)
2864
{
2865
	return ci_populate_ulv_level(rdev, ulv_level);
2866
}
2867
 
2868
static int ci_populate_all_memory_levels(struct radeon_device *rdev)
2869
{
2870
	struct ci_power_info *pi = ci_get_pi(rdev);
2871
	struct ci_dpm_table *dpm_table = &pi->dpm_table;
2872
	u32 level_array_address = pi->dpm_table_start +
2873
		offsetof(SMU7_Discrete_DpmTable, MemoryLevel);
2874
	u32 level_array_size = sizeof(SMU7_Discrete_MemoryLevel) *
2875
		SMU7_MAX_LEVELS_MEMORY;
2876
	SMU7_Discrete_MemoryLevel *levels = pi->smc_state_table.MemoryLevel;
2877
	u32 i, ret;
2878
 
2879
	memset(levels, 0, level_array_size);
2880
 
2881
	for (i = 0; i < dpm_table->mclk_table.count; i++) {
2882
		if (dpm_table->mclk_table.dpm_levels[i].value == 0)
2883
			return -EINVAL;
2884
		ret = ci_populate_single_memory_level(rdev,
2885
						      dpm_table->mclk_table.dpm_levels[i].value,
2886
						      &pi->smc_state_table.MemoryLevel[i]);
2887
		if (ret)
2888
			return ret;
2889
	}
2890
 
2891
	pi->smc_state_table.MemoryLevel[0].ActivityLevel = cpu_to_be16(0x1F);
2892
 
2893
	pi->smc_state_table.MemoryDpmLevelCount = (u8)dpm_table->mclk_table.count;
2894
	pi->dpm_level_enable_mask.mclk_dpm_enable_mask =
2895
		ci_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
2896
 
2897
	pi->smc_state_table.MemoryLevel[dpm_table->mclk_table.count - 1].DisplayWatermark =
2898
		PPSMC_DISPLAY_WATERMARK_HIGH;
2899
 
2900
	ret = ci_copy_bytes_to_smc(rdev, level_array_address,
2901
				   (u8 *)levels, level_array_size,
2902
				   pi->sram_end);
2903
	if (ret)
2904
		return ret;
2905
 
2906
	return 0;
2907
}
2908
 
2909
static void ci_reset_single_dpm_table(struct radeon_device *rdev,
2910
				      struct ci_single_dpm_table* dpm_table,
2911
				      u32 count)
2912
{
2913
	u32 i;
2914
 
2915
	dpm_table->count = count;
2916
	for (i = 0; i < MAX_REGULAR_DPM_NUMBER; i++)
2917
		dpm_table->dpm_levels[i].enabled = false;
2918
}
2919
 
2920
static void ci_setup_pcie_table_entry(struct ci_single_dpm_table* dpm_table,
2921
				      u32 index, u32 pcie_gen, u32 pcie_lanes)
2922
{
2923
	dpm_table->dpm_levels[index].value = pcie_gen;
2924
	dpm_table->dpm_levels[index].param1 = pcie_lanes;
2925
	dpm_table->dpm_levels[index].enabled = true;
2926
}
2927
 
2928
static int ci_setup_default_pcie_tables(struct radeon_device *rdev)
2929
{
2930
	struct ci_power_info *pi = ci_get_pi(rdev);
2931
 
2932
	if (!pi->use_pcie_performance_levels && !pi->use_pcie_powersaving_levels)
2933
		return -EINVAL;
2934
 
2935
	if (pi->use_pcie_performance_levels && !pi->use_pcie_powersaving_levels) {
2936
		pi->pcie_gen_powersaving = pi->pcie_gen_performance;
2937
		pi->pcie_lane_powersaving = pi->pcie_lane_performance;
2938
	} else if (!pi->use_pcie_performance_levels && pi->use_pcie_powersaving_levels) {
2939
		pi->pcie_gen_performance = pi->pcie_gen_powersaving;
2940
		pi->pcie_lane_performance = pi->pcie_lane_powersaving;
2941
	}
2942
 
2943
	ci_reset_single_dpm_table(rdev,
2944
				  &pi->dpm_table.pcie_speed_table,
2945
				  SMU7_MAX_LEVELS_LINK);
2946
 
2947
	ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 0,
2948
				  pi->pcie_gen_powersaving.min,
2949
				  pi->pcie_lane_powersaving.min);
2950
	ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 1,
2951
				  pi->pcie_gen_performance.min,
2952
				  pi->pcie_lane_performance.min);
2953
	ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 2,
2954
				  pi->pcie_gen_powersaving.min,
2955
				  pi->pcie_lane_powersaving.max);
2956
	ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 3,
2957
				  pi->pcie_gen_performance.min,
2958
				  pi->pcie_lane_performance.max);
2959
	ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 4,
2960
				  pi->pcie_gen_powersaving.max,
2961
				  pi->pcie_lane_powersaving.max);
2962
	ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 5,
2963
				  pi->pcie_gen_performance.max,
2964
				  pi->pcie_lane_performance.max);
2965
 
2966
	pi->dpm_table.pcie_speed_table.count = 6;
2967
 
2968
	return 0;
2969
}
2970
 
2971
static int ci_setup_default_dpm_tables(struct radeon_device *rdev)
2972
{
2973
	struct ci_power_info *pi = ci_get_pi(rdev);
2974
	struct radeon_clock_voltage_dependency_table *allowed_sclk_vddc_table =
2975
		&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
2976
	struct radeon_clock_voltage_dependency_table *allowed_mclk_table =
2977
		&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk;
2978
	struct radeon_cac_leakage_table *std_voltage_table =
2979
		&rdev->pm.dpm.dyn_state.cac_leakage_table;
2980
	u32 i;
2981
 
2982
	if (allowed_sclk_vddc_table == NULL)
2983
		return -EINVAL;
2984
	if (allowed_sclk_vddc_table->count < 1)
2985
		return -EINVAL;
2986
	if (allowed_mclk_table == NULL)
2987
		return -EINVAL;
2988
	if (allowed_mclk_table->count < 1)
2989
		return -EINVAL;
2990
 
2991
	memset(&pi->dpm_table, 0, sizeof(struct ci_dpm_table));
2992
 
2993
	ci_reset_single_dpm_table(rdev,
2994
				  &pi->dpm_table.sclk_table,
2995
				  SMU7_MAX_LEVELS_GRAPHICS);
2996
	ci_reset_single_dpm_table(rdev,
2997
				  &pi->dpm_table.mclk_table,
2998
				  SMU7_MAX_LEVELS_MEMORY);
2999
	ci_reset_single_dpm_table(rdev,
3000
				  &pi->dpm_table.vddc_table,
3001
				  SMU7_MAX_LEVELS_VDDC);
3002
	ci_reset_single_dpm_table(rdev,
3003
				  &pi->dpm_table.vddci_table,
3004
				  SMU7_MAX_LEVELS_VDDCI);
3005
	ci_reset_single_dpm_table(rdev,
3006
				  &pi->dpm_table.mvdd_table,
3007
				  SMU7_MAX_LEVELS_MVDD);
3008
 
3009
	pi->dpm_table.sclk_table.count = 0;
3010
	for (i = 0; i < allowed_sclk_vddc_table->count; i++) {
3011
		if ((i == 0) ||
3012
		    (pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count-1].value !=
3013
		     allowed_sclk_vddc_table->entries[i].clk)) {
3014
			pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].value =
3015
				allowed_sclk_vddc_table->entries[i].clk;
3016
			pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].enabled = true;
3017
			pi->dpm_table.sclk_table.count++;
3018
		}
3019
	}
3020
 
3021
	pi->dpm_table.mclk_table.count = 0;
3022
	for (i = 0; i < allowed_mclk_table->count; i++) {
3023
		if ((i==0) ||
3024
		    (pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count-1].value !=
3025
		     allowed_mclk_table->entries[i].clk)) {
3026
			pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].value =
3027
				allowed_mclk_table->entries[i].clk;
3028
			pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].enabled = true;
3029
			pi->dpm_table.mclk_table.count++;
3030
		}
3031
	}
3032
 
3033
	for (i = 0; i < allowed_sclk_vddc_table->count; i++) {
3034
		pi->dpm_table.vddc_table.dpm_levels[i].value =
3035
			allowed_sclk_vddc_table->entries[i].v;
3036
		pi->dpm_table.vddc_table.dpm_levels[i].param1 =
3037
			std_voltage_table->entries[i].leakage;
3038
		pi->dpm_table.vddc_table.dpm_levels[i].enabled = true;
3039
	}
3040
	pi->dpm_table.vddc_table.count = allowed_sclk_vddc_table->count;
3041
 
3042
	allowed_mclk_table = &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk;
3043
	if (allowed_mclk_table) {
3044
		for (i = 0; i < allowed_mclk_table->count; i++) {
3045
			pi->dpm_table.vddci_table.dpm_levels[i].value =
3046
				allowed_mclk_table->entries[i].v;
3047
			pi->dpm_table.vddci_table.dpm_levels[i].enabled = true;
3048
		}
3049
		pi->dpm_table.vddci_table.count = allowed_mclk_table->count;
3050
	}
3051
 
3052
	allowed_mclk_table = &rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk;
3053
	if (allowed_mclk_table) {
3054
		for (i = 0; i < allowed_mclk_table->count; i++) {
3055
			pi->dpm_table.mvdd_table.dpm_levels[i].value =
3056
				allowed_mclk_table->entries[i].v;
3057
			pi->dpm_table.mvdd_table.dpm_levels[i].enabled = true;
3058
		}
3059
		pi->dpm_table.mvdd_table.count = allowed_mclk_table->count;
3060
	}
3061
 
3062
	ci_setup_default_pcie_tables(rdev);
3063
 
3064
	return 0;
3065
}
3066
 
3067
static int ci_find_boot_level(struct ci_single_dpm_table *table,
3068
			      u32 value, u32 *boot_level)
3069
{
3070
	u32 i;
3071
	int ret = -EINVAL;
3072
 
3073
	for(i = 0; i < table->count; i++) {
3074
		if (value == table->dpm_levels[i].value) {
3075
			*boot_level = i;
3076
			ret = 0;
3077
		}
3078
	}
3079
 
3080
	return ret;
3081
}
3082
 
3083
static int ci_init_smc_table(struct radeon_device *rdev)
3084
{
3085
	struct ci_power_info *pi = ci_get_pi(rdev);
3086
	struct ci_ulv_parm *ulv = &pi->ulv;
3087
	struct radeon_ps *radeon_boot_state = rdev->pm.dpm.boot_ps;
3088
	SMU7_Discrete_DpmTable *table = &pi->smc_state_table;
3089
	int ret;
3090
 
3091
	ret = ci_setup_default_dpm_tables(rdev);
3092
	if (ret)
3093
		return ret;
3094
 
3095
	if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_NONE)
3096
		ci_populate_smc_voltage_tables(rdev, table);
3097
 
3098
	ci_init_fps_limits(rdev);
3099
 
3100
	if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_HARDWAREDC)
3101
		table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
3102
 
3103
	if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_STEPVDDC)
3104
		table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
3105
 
3106
	if (pi->mem_gddr5)
3107
		table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
3108
 
3109
	if (ulv->supported) {
3110
		ret = ci_populate_ulv_state(rdev, &pi->smc_state_table.Ulv);
3111
		if (ret)
3112
			return ret;
3113
		WREG32_SMC(CG_ULV_PARAMETER, ulv->cg_ulv_parameter);
3114
	}
3115
 
3116
	ret = ci_populate_all_graphic_levels(rdev);
3117
	if (ret)
3118
		return ret;
3119
 
3120
	ret = ci_populate_all_memory_levels(rdev);
3121
	if (ret)
3122
		return ret;
3123
 
3124
	ci_populate_smc_link_level(rdev, table);
3125
 
3126
	ret = ci_populate_smc_acpi_level(rdev, table);
3127
	if (ret)
3128
		return ret;
3129
 
3130
	ret = ci_populate_smc_vce_level(rdev, table);
3131
	if (ret)
3132
		return ret;
3133
 
3134
	ret = ci_populate_smc_acp_level(rdev, table);
3135
	if (ret)
3136
		return ret;
3137
 
3138
	ret = ci_populate_smc_samu_level(rdev, table);
3139
	if (ret)
3140
		return ret;
3141
 
3142
	ret = ci_do_program_memory_timing_parameters(rdev);
3143
	if (ret)
3144
		return ret;
3145
 
3146
	ret = ci_populate_smc_uvd_level(rdev, table);
3147
	if (ret)
3148
		return ret;
3149
 
3150
	table->UvdBootLevel  = 0;
3151
	table->VceBootLevel  = 0;
3152
	table->AcpBootLevel  = 0;
3153
	table->SamuBootLevel  = 0;
3154
	table->GraphicsBootLevel  = 0;
3155
	table->MemoryBootLevel  = 0;
3156
 
3157
	ret = ci_find_boot_level(&pi->dpm_table.sclk_table,
3158
				 pi->vbios_boot_state.sclk_bootup_value,
3159
				 (u32 *)&pi->smc_state_table.GraphicsBootLevel);
3160
 
3161
	ret = ci_find_boot_level(&pi->dpm_table.mclk_table,
3162
				 pi->vbios_boot_state.mclk_bootup_value,
3163
				 (u32 *)&pi->smc_state_table.MemoryBootLevel);
3164
 
3165
	table->BootVddc = pi->vbios_boot_state.vddc_bootup_value;
3166
	table->BootVddci = pi->vbios_boot_state.vddci_bootup_value;
3167
	table->BootMVdd = pi->vbios_boot_state.mvdd_bootup_value;
3168
 
3169
	ci_populate_smc_initial_state(rdev, radeon_boot_state);
3170
 
3171
	ret = ci_populate_bapm_parameters_in_dpm_table(rdev);
3172
	if (ret)
3173
		return ret;
3174
 
3175
	table->UVDInterval = 1;
3176
	table->VCEInterval = 1;
3177
	table->ACPInterval = 1;
3178
	table->SAMUInterval = 1;
3179
	table->GraphicsVoltageChangeEnable = 1;
3180
	table->GraphicsThermThrottleEnable = 1;
3181
	table->GraphicsInterval = 1;
3182
	table->VoltageInterval = 1;
3183
	table->ThermalInterval = 1;
3184
	table->TemperatureLimitHigh = (u16)((pi->thermal_temp_setting.temperature_high *
3185
					     CISLANDS_Q88_FORMAT_CONVERSION_UNIT) / 1000);
3186
	table->TemperatureLimitLow = (u16)((pi->thermal_temp_setting.temperature_low *
3187
					    CISLANDS_Q88_FORMAT_CONVERSION_UNIT) / 1000);
3188
	table->MemoryVoltageChangeEnable = 1;
3189
	table->MemoryInterval = 1;
3190
	table->VoltageResponseTime = 0;
3191
	table->VddcVddciDelta = 4000;
3192
	table->PhaseResponseTime = 0;
3193
	table->MemoryThermThrottleEnable = 1;
3194
	table->PCIeBootLinkLevel = 0;
3195
	table->PCIeGenInterval = 1;
3196
	if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2)
3197
		table->SVI2Enable  = 1;
3198
	else
3199
		table->SVI2Enable  = 0;
3200
 
3201
	table->ThermGpio = 17;
3202
	table->SclkStepSize = 0x4000;
3203
 
3204
	table->SystemFlags = cpu_to_be32(table->SystemFlags);
3205
	table->SmioMaskVddcVid = cpu_to_be32(table->SmioMaskVddcVid);
3206
	table->SmioMaskVddcPhase = cpu_to_be32(table->SmioMaskVddcPhase);
3207
	table->SmioMaskVddciVid = cpu_to_be32(table->SmioMaskVddciVid);
3208
	table->SmioMaskMvddVid = cpu_to_be32(table->SmioMaskMvddVid);
3209
	table->SclkStepSize = cpu_to_be32(table->SclkStepSize);
3210
	table->TemperatureLimitHigh = cpu_to_be16(table->TemperatureLimitHigh);
3211
	table->TemperatureLimitLow = cpu_to_be16(table->TemperatureLimitLow);
3212
	table->VddcVddciDelta = cpu_to_be16(table->VddcVddciDelta);
3213
	table->VoltageResponseTime = cpu_to_be16(table->VoltageResponseTime);
3214
	table->PhaseResponseTime = cpu_to_be16(table->PhaseResponseTime);
3215
	table->BootVddc = cpu_to_be16(table->BootVddc * VOLTAGE_SCALE);
3216
	table->BootVddci = cpu_to_be16(table->BootVddci * VOLTAGE_SCALE);
3217
	table->BootMVdd = cpu_to_be16(table->BootMVdd * VOLTAGE_SCALE);
3218
 
3219
	ret = ci_copy_bytes_to_smc(rdev,
3220
				   pi->dpm_table_start +
3221
				   offsetof(SMU7_Discrete_DpmTable, SystemFlags),
3222
				   (u8 *)&table->SystemFlags,
3223
				   sizeof(SMU7_Discrete_DpmTable) - 3 * sizeof(SMU7_PIDController),
3224
				   pi->sram_end);
3225
	if (ret)
3226
		return ret;
3227
 
3228
	return 0;
3229
}
3230
 
3231
static void ci_trim_single_dpm_states(struct radeon_device *rdev,
3232
				      struct ci_single_dpm_table *dpm_table,
3233
				      u32 low_limit, u32 high_limit)
3234
{
3235
	u32 i;
3236
 
3237
	for (i = 0; i < dpm_table->count; i++) {
3238
		if ((dpm_table->dpm_levels[i].value < low_limit) ||
3239
		    (dpm_table->dpm_levels[i].value > high_limit))
3240
			dpm_table->dpm_levels[i].enabled = false;
3241
		else
3242
			dpm_table->dpm_levels[i].enabled = true;
3243
	}
3244
}
3245
 
3246
static void ci_trim_pcie_dpm_states(struct radeon_device *rdev,
3247
				    u32 speed_low, u32 lanes_low,
3248
				    u32 speed_high, u32 lanes_high)
3249
{
3250
	struct ci_power_info *pi = ci_get_pi(rdev);
3251
	struct ci_single_dpm_table *pcie_table = &pi->dpm_table.pcie_speed_table;
3252
	u32 i, j;
3253
 
3254
	for (i = 0; i < pcie_table->count; i++) {
3255
		if ((pcie_table->dpm_levels[i].value < speed_low) ||
3256
		    (pcie_table->dpm_levels[i].param1 < lanes_low) ||
3257
		    (pcie_table->dpm_levels[i].value > speed_high) ||
3258
		    (pcie_table->dpm_levels[i].param1 > lanes_high))
3259
			pcie_table->dpm_levels[i].enabled = false;
3260
		else
3261
			pcie_table->dpm_levels[i].enabled = true;
3262
	}
3263
 
3264
	for (i = 0; i < pcie_table->count; i++) {
3265
		if (pcie_table->dpm_levels[i].enabled) {
3266
			for (j = i + 1; j < pcie_table->count; j++) {
3267
				if (pcie_table->dpm_levels[j].enabled) {
3268
					if ((pcie_table->dpm_levels[i].value == pcie_table->dpm_levels[j].value) &&
3269
					    (pcie_table->dpm_levels[i].param1 == pcie_table->dpm_levels[j].param1))
3270
						pcie_table->dpm_levels[j].enabled = false;
3271
				}
3272
			}
3273
		}
3274
	}
3275
}
3276
 
3277
static int ci_trim_dpm_states(struct radeon_device *rdev,
3278
			      struct radeon_ps *radeon_state)
3279
{
3280
	struct ci_ps *state = ci_get_ps(radeon_state);
3281
	struct ci_power_info *pi = ci_get_pi(rdev);
3282
	u32 high_limit_count;
3283
 
3284
	if (state->performance_level_count < 1)
3285
		return -EINVAL;
3286
 
3287
	if (state->performance_level_count == 1)
3288
		high_limit_count = 0;
3289
	else
3290
		high_limit_count = 1;
3291
 
3292
	ci_trim_single_dpm_states(rdev,
3293
				  &pi->dpm_table.sclk_table,
3294
				  state->performance_levels[0].sclk,
3295
				  state->performance_levels[high_limit_count].sclk);
3296
 
3297
	ci_trim_single_dpm_states(rdev,
3298
				  &pi->dpm_table.mclk_table,
3299
				  state->performance_levels[0].mclk,
3300
				  state->performance_levels[high_limit_count].mclk);
3301
 
3302
	ci_trim_pcie_dpm_states(rdev,
3303
				state->performance_levels[0].pcie_gen,
3304
				state->performance_levels[0].pcie_lane,
3305
				state->performance_levels[high_limit_count].pcie_gen,
3306
				state->performance_levels[high_limit_count].pcie_lane);
3307
 
3308
	return 0;
3309
}
3310
 
3311
static int ci_apply_disp_minimum_voltage_request(struct radeon_device *rdev)
3312
{
3313
	struct radeon_clock_voltage_dependency_table *disp_voltage_table =
3314
		&rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk;
3315
	struct radeon_clock_voltage_dependency_table *vddc_table =
3316
		&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
3317
	u32 requested_voltage = 0;
3318
	u32 i;
3319
 
3320
	if (disp_voltage_table == NULL)
3321
		return -EINVAL;
3322
	if (!disp_voltage_table->count)
3323
		return -EINVAL;
3324
 
3325
	for (i = 0; i < disp_voltage_table->count; i++) {
3326
		if (rdev->clock.current_dispclk == disp_voltage_table->entries[i].clk)
3327
			requested_voltage = disp_voltage_table->entries[i].v;
3328
	}
3329
 
3330
	for (i = 0; i < vddc_table->count; i++) {
3331
		if (requested_voltage <= vddc_table->entries[i].v) {
3332
			requested_voltage = vddc_table->entries[i].v;
3333
			return (ci_send_msg_to_smc_with_parameter(rdev,
3334
								  PPSMC_MSG_VddC_Request,
3335
								  requested_voltage * VOLTAGE_SCALE) == PPSMC_Result_OK) ?
3336
 
3337
		}
3338
	}
3339
 
3340
	return -EINVAL;
3341
}
3342
 
3343
static int ci_upload_dpm_level_enable_mask(struct radeon_device *rdev)
3344
{
3345
	struct ci_power_info *pi = ci_get_pi(rdev);
3346
	PPSMC_Result result;
3347
 
3348
	if (!pi->sclk_dpm_key_disabled) {
3349
		if (pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
3350
			result = ci_send_msg_to_smc_with_parameter(rdev,
3351
								   PPSMC_MSG_SCLKDPM_SetEnabledMask,
3352
								   pi->dpm_level_enable_mask.sclk_dpm_enable_mask);
3353
			if (result != PPSMC_Result_OK)
3354
				return -EINVAL;
3355
		}
3356
	}
3357
 
3358
	if (!pi->mclk_dpm_key_disabled) {
3359
		if (pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
3360
			result = ci_send_msg_to_smc_with_parameter(rdev,
3361
								   PPSMC_MSG_MCLKDPM_SetEnabledMask,
3362
								   pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
3363
			if (result != PPSMC_Result_OK)
3364
				return -EINVAL;
3365
		}
3366
	}
3367
 
3368
	if (!pi->pcie_dpm_key_disabled) {
3369
		if (pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
3370
			result = ci_send_msg_to_smc_with_parameter(rdev,
3371
								   PPSMC_MSG_PCIeDPM_SetEnabledMask,
3372
								   pi->dpm_level_enable_mask.pcie_dpm_enable_mask);
3373
			if (result != PPSMC_Result_OK)
3374
				return -EINVAL;
3375
		}
3376
	}
3377
 
3378
	ci_apply_disp_minimum_voltage_request(rdev);
3379
 
3380
	return 0;
3381
}
3382
 
3383
static void ci_find_dpm_states_clocks_in_dpm_table(struct radeon_device *rdev,
3384
						   struct radeon_ps *radeon_state)
3385
{
3386
	struct ci_power_info *pi = ci_get_pi(rdev);
3387
	struct ci_ps *state = ci_get_ps(radeon_state);
3388
	struct ci_single_dpm_table *sclk_table = &pi->dpm_table.sclk_table;
3389
	u32 sclk = state->performance_levels[state->performance_level_count-1].sclk;
3390
	struct ci_single_dpm_table *mclk_table = &pi->dpm_table.mclk_table;
3391
	u32 mclk = state->performance_levels[state->performance_level_count-1].mclk;
3392
	u32 i;
3393
 
3394
	pi->need_update_smu7_dpm_table = 0;
3395
 
3396
	for (i = 0; i < sclk_table->count; i++) {
3397
		if (sclk == sclk_table->dpm_levels[i].value)
3398
			break;
3399
	}
3400
 
3401
	if (i >= sclk_table->count) {
3402
		pi->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
3403
	} else {
3404
		/* XXX check display min clock requirements */
3405
		if (0 != CISLAND_MINIMUM_ENGINE_CLOCK)
3406
			pi->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK;
3407
	}
3408
 
3409
	for (i = 0; i < mclk_table->count; i++) {
3410
		if (mclk == mclk_table->dpm_levels[i].value)
3411
			break;
3412
	}
3413
 
3414
	if (i >= mclk_table->count)
3415
		pi->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
3416
 
3417
	if (rdev->pm.dpm.current_active_crtc_count !=
3418
	    rdev->pm.dpm.new_active_crtc_count)
3419
		pi->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK;
3420
}
3421
 
3422
static int ci_populate_and_upload_sclk_mclk_dpm_levels(struct radeon_device *rdev,
3423
						       struct radeon_ps *radeon_state)
3424
{
3425
	struct ci_power_info *pi = ci_get_pi(rdev);
3426
	struct ci_ps *state = ci_get_ps(radeon_state);
3427
	u32 sclk = state->performance_levels[state->performance_level_count-1].sclk;
3428
	u32 mclk = state->performance_levels[state->performance_level_count-1].mclk;
3429
	struct ci_dpm_table *dpm_table = &pi->dpm_table;
3430
	int ret;
3431
 
3432
	if (!pi->need_update_smu7_dpm_table)
3433
		return 0;
3434
 
3435
	if (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK)
3436
		dpm_table->sclk_table.dpm_levels[dpm_table->sclk_table.count-1].value = sclk;
3437
 
3438
	if (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)
3439
		dpm_table->mclk_table.dpm_levels[dpm_table->mclk_table.count-1].value = mclk;
3440
 
3441
	if (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK)) {
3442
		ret = ci_populate_all_graphic_levels(rdev);
3443
		if (ret)
3444
			return ret;
3445
	}
3446
 
3447
	if (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_MCLK | DPMTABLE_UPDATE_MCLK)) {
3448
		ret = ci_populate_all_memory_levels(rdev);
3449
		if (ret)
3450
			return ret;
3451
	}
3452
 
3453
	return 0;
3454
}
3455
 
3456
static int ci_enable_uvd_dpm(struct radeon_device *rdev, bool enable)
3457
{
3458
	struct ci_power_info *pi = ci_get_pi(rdev);
3459
	const struct radeon_clock_and_voltage_limits *max_limits;
3460
	int i;
3461
 
3462
	if (rdev->pm.dpm.ac_power)
3463
		max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
3464
	else
3465
		max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
3466
 
3467
	if (enable) {
3468
		pi->dpm_level_enable_mask.uvd_dpm_enable_mask = 0;
3469
 
3470
		for (i = rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
3471
			if (rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
3472
				pi->dpm_level_enable_mask.uvd_dpm_enable_mask |= 1 << i;
3473
 
3474
				if (!pi->caps_uvd_dpm)
3475
					break;
3476
			}
3477
		}
3478
 
3479
		ci_send_msg_to_smc_with_parameter(rdev,
3480
						  PPSMC_MSG_UVDDPM_SetEnabledMask,
3481
						  pi->dpm_level_enable_mask.uvd_dpm_enable_mask);
3482
 
3483
		if (pi->last_mclk_dpm_enable_mask & 0x1) {
3484
			pi->uvd_enabled = true;
3485
			pi->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE;
3486
			ci_send_msg_to_smc_with_parameter(rdev,
3487
							  PPSMC_MSG_MCLKDPM_SetEnabledMask,
3488
							  pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
3489
		}
3490
	} else {
3491
		if (pi->last_mclk_dpm_enable_mask & 0x1) {
3492
			pi->uvd_enabled = false;
3493
			pi->dpm_level_enable_mask.mclk_dpm_enable_mask |= 1;
3494
			ci_send_msg_to_smc_with_parameter(rdev,
3495
							  PPSMC_MSG_MCLKDPM_SetEnabledMask,
3496
							  pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
3497
		}
3498
	}
3499
 
3500
	return (ci_send_msg_to_smc(rdev, enable ?
3501
				   PPSMC_MSG_UVDDPM_Enable : PPSMC_MSG_UVDDPM_Disable) == PPSMC_Result_OK) ?
3502
 
3503
}
3504
 
3505
static int ci_enable_vce_dpm(struct radeon_device *rdev, bool enable)
3506
{
3507
	struct ci_power_info *pi = ci_get_pi(rdev);
3508
	const struct radeon_clock_and_voltage_limits *max_limits;
3509
	int i;
3510
 
3511
	if (rdev->pm.dpm.ac_power)
3512
		max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
3513
	else
3514
		max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
3515
 
3516
	if (enable) {
3517
		pi->dpm_level_enable_mask.vce_dpm_enable_mask = 0;
3518
		for (i = rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
3519
			if (rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
3520
				pi->dpm_level_enable_mask.vce_dpm_enable_mask |= 1 << i;
3521
 
3522
				if (!pi->caps_vce_dpm)
3523
					break;
3524
			}
3525
		}
3526
 
3527
		ci_send_msg_to_smc_with_parameter(rdev,
3528
						  PPSMC_MSG_VCEDPM_SetEnabledMask,
3529
						  pi->dpm_level_enable_mask.vce_dpm_enable_mask);
3530
	}
3531
 
3532
	return (ci_send_msg_to_smc(rdev, enable ?
3533
				   PPSMC_MSG_VCEDPM_Enable : PPSMC_MSG_VCEDPM_Disable) == PPSMC_Result_OK) ?
3534
 
3535
}
3536
 
3537
#if 0
3538
static int ci_enable_samu_dpm(struct radeon_device *rdev, bool enable)
3539
{
3540
	struct ci_power_info *pi = ci_get_pi(rdev);
3541
	const struct radeon_clock_and_voltage_limits *max_limits;
3542
	int i;
3543
 
3544
	if (rdev->pm.dpm.ac_power)
3545
		max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
3546
	else
3547
		max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
3548
 
3549
	if (enable) {
3550
		pi->dpm_level_enable_mask.samu_dpm_enable_mask = 0;
3551
		for (i = rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
3552
			if (rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
3553
				pi->dpm_level_enable_mask.samu_dpm_enable_mask |= 1 << i;
3554
 
3555
				if (!pi->caps_samu_dpm)
3556
					break;
3557
			}
3558
		}
3559
 
3560
		ci_send_msg_to_smc_with_parameter(rdev,
3561
						  PPSMC_MSG_SAMUDPM_SetEnabledMask,
3562
						  pi->dpm_level_enable_mask.samu_dpm_enable_mask);
3563
	}
3564
	return (ci_send_msg_to_smc(rdev, enable ?
3565
				   PPSMC_MSG_SAMUDPM_Enable : PPSMC_MSG_SAMUDPM_Disable) == PPSMC_Result_OK) ?
3566
 
3567
}
3568
 
3569
static int ci_enable_acp_dpm(struct radeon_device *rdev, bool enable)
3570
{
3571
	struct ci_power_info *pi = ci_get_pi(rdev);
3572
	const struct radeon_clock_and_voltage_limits *max_limits;
3573
	int i;
3574
 
3575
	if (rdev->pm.dpm.ac_power)
3576
		max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
3577
	else
3578
		max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
3579
 
3580
	if (enable) {
3581
		pi->dpm_level_enable_mask.acp_dpm_enable_mask = 0;
3582
		for (i = rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
3583
			if (rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
3584
				pi->dpm_level_enable_mask.acp_dpm_enable_mask |= 1 << i;
3585
 
3586
				if (!pi->caps_acp_dpm)
3587
					break;
3588
			}
3589
		}
3590
 
3591
		ci_send_msg_to_smc_with_parameter(rdev,
3592
						  PPSMC_MSG_ACPDPM_SetEnabledMask,
3593
						  pi->dpm_level_enable_mask.acp_dpm_enable_mask);
3594
	}
3595
 
3596
	return (ci_send_msg_to_smc(rdev, enable ?
3597
				   PPSMC_MSG_ACPDPM_Enable : PPSMC_MSG_ACPDPM_Disable) == PPSMC_Result_OK) ?
3598
 
3599
}
3600
#endif
3601
 
3602
static int ci_update_uvd_dpm(struct radeon_device *rdev, bool gate)
3603
{
3604
	struct ci_power_info *pi = ci_get_pi(rdev);
3605
	u32 tmp;
3606
 
3607
	if (!gate) {
3608
		if (pi->caps_uvd_dpm ||
3609
		    (rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count <= 0))
3610
			pi->smc_state_table.UvdBootLevel = 0;
3611
		else
3612
			pi->smc_state_table.UvdBootLevel =
3613
				rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count - 1;
3614
 
3615
		tmp = RREG32_SMC(DPM_TABLE_475);
3616
		tmp &= ~UvdBootLevel_MASK;
3617
		tmp |= UvdBootLevel(pi->smc_state_table.UvdBootLevel);
3618
		WREG32_SMC(DPM_TABLE_475, tmp);
3619
	}
3620
 
3621
	return ci_enable_uvd_dpm(rdev, !gate);
3622
}
3623
 
3624
static u8 ci_get_vce_boot_level(struct radeon_device *rdev)
3625
{
3626
	u8 i;
3627
	u32 min_evclk = 30000; /* ??? */
3628
	struct radeon_vce_clock_voltage_dependency_table *table =
3629
		&rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
3630
 
3631
	for (i = 0; i < table->count; i++) {
3632
		if (table->entries[i].evclk >= min_evclk)
3633
			return i;
3634
	}
3635
 
3636
	return table->count - 1;
3637
}
3638
 
3639
static int ci_update_vce_dpm(struct radeon_device *rdev,
3640
			     struct radeon_ps *radeon_new_state,
3641
			     struct radeon_ps *radeon_current_state)
3642
{
3643
	struct ci_power_info *pi = ci_get_pi(rdev);
3644
	int ret = 0;
3645
	u32 tmp;
3646
 
3647
	if (radeon_current_state->evclk != radeon_new_state->evclk) {
3648
		if (radeon_new_state->evclk) {
3649
			/* turn the clocks on when encoding */
3650
			cik_update_cg(rdev, RADEON_CG_BLOCK_VCE, false);
3651
 
3652
			pi->smc_state_table.VceBootLevel = ci_get_vce_boot_level(rdev);
3653
			tmp = RREG32_SMC(DPM_TABLE_475);
3654
			tmp &= ~VceBootLevel_MASK;
3655
			tmp |= VceBootLevel(pi->smc_state_table.VceBootLevel);
3656
			WREG32_SMC(DPM_TABLE_475, tmp);
3657
 
3658
			ret = ci_enable_vce_dpm(rdev, true);
3659
		} else {
3660
			/* turn the clocks off when not encoding */
3661
			cik_update_cg(rdev, RADEON_CG_BLOCK_VCE, true);
3662
 
3663
			ret = ci_enable_vce_dpm(rdev, false);
3664
		}
3665
	}
3666
	return ret;
3667
}
3668
 
3669
#if 0
3670
static int ci_update_samu_dpm(struct radeon_device *rdev, bool gate)
3671
{
3672
	return ci_enable_samu_dpm(rdev, gate);
3673
}
3674
 
3675
static int ci_update_acp_dpm(struct radeon_device *rdev, bool gate)
3676
{
3677
	struct ci_power_info *pi = ci_get_pi(rdev);
3678
	u32 tmp;
3679
 
3680
	if (!gate) {
3681
		pi->smc_state_table.AcpBootLevel = 0;
3682
 
3683
		tmp = RREG32_SMC(DPM_TABLE_475);
3684
		tmp &= ~AcpBootLevel_MASK;
3685
		tmp |= AcpBootLevel(pi->smc_state_table.AcpBootLevel);
3686
		WREG32_SMC(DPM_TABLE_475, tmp);
3687
	}
3688
 
3689
	return ci_enable_acp_dpm(rdev, !gate);
3690
}
3691
#endif
3692
 
3693
static int ci_generate_dpm_level_enable_mask(struct radeon_device *rdev,
3694
					     struct radeon_ps *radeon_state)
3695
{
3696
	struct ci_power_info *pi = ci_get_pi(rdev);
3697
	int ret;
3698
 
3699
	ret = ci_trim_dpm_states(rdev, radeon_state);
3700
	if (ret)
3701
		return ret;
3702
 
3703
	pi->dpm_level_enable_mask.sclk_dpm_enable_mask =
3704
		ci_get_dpm_level_enable_mask_value(&pi->dpm_table.sclk_table);
3705
	pi->dpm_level_enable_mask.mclk_dpm_enable_mask =
3706
		ci_get_dpm_level_enable_mask_value(&pi->dpm_table.mclk_table);
3707
	pi->last_mclk_dpm_enable_mask =
3708
		pi->dpm_level_enable_mask.mclk_dpm_enable_mask;
3709
	if (pi->uvd_enabled) {
3710
		if (pi->dpm_level_enable_mask.mclk_dpm_enable_mask & 1)
3711
			pi->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE;
3712
	}
3713
	pi->dpm_level_enable_mask.pcie_dpm_enable_mask =
3714
		ci_get_dpm_level_enable_mask_value(&pi->dpm_table.pcie_speed_table);
3715
 
3716
	return 0;
3717
}
3718
 
3719
static u32 ci_get_lowest_enabled_level(struct radeon_device *rdev,
3720
				       u32 level_mask)
3721
{
3722
	u32 level = 0;
3723
 
3724
	while ((level_mask & (1 << level)) == 0)
3725
		level++;
3726
 
3727
	return level;
3728
}
3729
 
3730
 
3731
int ci_dpm_force_performance_level(struct radeon_device *rdev,
3732
				   enum radeon_dpm_forced_level level)
3733
{
3734
	struct ci_power_info *pi = ci_get_pi(rdev);
3735
	PPSMC_Result smc_result;
3736
	u32 tmp, levels, i;
3737
	int ret;
3738
 
3739
	if (level == RADEON_DPM_FORCED_LEVEL_HIGH) {
3740
		if ((!pi->sclk_dpm_key_disabled) &&
3741
		    pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
3742
			levels = 0;
3743
			tmp = pi->dpm_level_enable_mask.sclk_dpm_enable_mask;
3744
			while (tmp >>= 1)
3745
				levels++;
3746
			if (levels) {
3747
				ret = ci_dpm_force_state_sclk(rdev, levels);
3748
				if (ret)
3749
					return ret;
3750
				for (i = 0; i < rdev->usec_timeout; i++) {
3751
					tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) &
3752
					       CURR_SCLK_INDEX_MASK) >> CURR_SCLK_INDEX_SHIFT;
3753
					if (tmp == levels)
3754
						break;
3755
					udelay(1);
3756
				}
3757
			}
3758
		}
3759
		if ((!pi->mclk_dpm_key_disabled) &&
3760
		    pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
3761
			levels = 0;
3762
			tmp = pi->dpm_level_enable_mask.mclk_dpm_enable_mask;
3763
			while (tmp >>= 1)
3764
				levels++;
3765
			if (levels) {
3766
				ret = ci_dpm_force_state_mclk(rdev, levels);
3767
				if (ret)
3768
					return ret;
3769
				for (i = 0; i < rdev->usec_timeout; i++) {
3770
					tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) &
3771
					       CURR_MCLK_INDEX_MASK) >> CURR_MCLK_INDEX_SHIFT;
3772
					if (tmp == levels)
3773
						break;
3774
					udelay(1);
3775
				}
3776
			}
3777
		}
3778
		if ((!pi->pcie_dpm_key_disabled) &&
3779
		    pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
3780
			levels = 0;
3781
			tmp = pi->dpm_level_enable_mask.pcie_dpm_enable_mask;
3782
			while (tmp >>= 1)
3783
				levels++;
3784
			if (levels) {
3785
				ret = ci_dpm_force_state_pcie(rdev, level);
3786
				if (ret)
3787
					return ret;
3788
				for (i = 0; i < rdev->usec_timeout; i++) {
3789
					tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX_1) &
3790
					       CURR_PCIE_INDEX_MASK) >> CURR_PCIE_INDEX_SHIFT;
3791
					if (tmp == levels)
3792
						break;
3793
					udelay(1);
3794
				}
3795
			}
3796
		}
3797
	} else if (level == RADEON_DPM_FORCED_LEVEL_LOW) {
3798
		if ((!pi->sclk_dpm_key_disabled) &&
3799
		    pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
3800
			levels = ci_get_lowest_enabled_level(rdev,
3801
							     pi->dpm_level_enable_mask.sclk_dpm_enable_mask);
3802
			ret = ci_dpm_force_state_sclk(rdev, levels);
3803
			if (ret)
3804
				return ret;
3805
			for (i = 0; i < rdev->usec_timeout; i++) {
3806
				tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) &
3807
				       CURR_SCLK_INDEX_MASK) >> CURR_SCLK_INDEX_SHIFT;
3808
				if (tmp == levels)
3809
					break;
3810
				udelay(1);
3811
			}
3812
		}
3813
		if ((!pi->mclk_dpm_key_disabled) &&
3814
		    pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
3815
			levels = ci_get_lowest_enabled_level(rdev,
3816
							     pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
3817
			ret = ci_dpm_force_state_mclk(rdev, levels);
3818
			if (ret)
3819
				return ret;
3820
			for (i = 0; i < rdev->usec_timeout; i++) {
3821
				tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) &
3822
				       CURR_MCLK_INDEX_MASK) >> CURR_MCLK_INDEX_SHIFT;
3823
				if (tmp == levels)
3824
					break;
3825
				udelay(1);
3826
			}
3827
		}
3828
		if ((!pi->pcie_dpm_key_disabled) &&
3829
		    pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
3830
			levels = ci_get_lowest_enabled_level(rdev,
3831
							     pi->dpm_level_enable_mask.pcie_dpm_enable_mask);
3832
			ret = ci_dpm_force_state_pcie(rdev, levels);
3833
			if (ret)
3834
				return ret;
3835
			for (i = 0; i < rdev->usec_timeout; i++) {
3836
				tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX_1) &
3837
				       CURR_PCIE_INDEX_MASK) >> CURR_PCIE_INDEX_SHIFT;
3838
				if (tmp == levels)
3839
					break;
3840
				udelay(1);
3841
			}
3842
		}
3843
	} else if (level == RADEON_DPM_FORCED_LEVEL_AUTO) {
3844
		if (!pi->sclk_dpm_key_disabled) {
3845
			smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_NoForcedLevel);
3846
			if (smc_result != PPSMC_Result_OK)
3847
				return -EINVAL;
3848
		}
3849
		if (!pi->mclk_dpm_key_disabled) {
3850
			smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_NoForcedLevel);
3851
			if (smc_result != PPSMC_Result_OK)
3852
				return -EINVAL;
3853
		}
3854
		if (!pi->pcie_dpm_key_disabled) {
3855
			smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_PCIeDPM_UnForceLevel);
3856
			if (smc_result != PPSMC_Result_OK)
3857
				return -EINVAL;
3858
		}
3859
	}
3860
 
3861
	rdev->pm.dpm.forced_level = level;
3862
 
3863
	return 0;
3864
}
3865
 
3866
static int ci_set_mc_special_registers(struct radeon_device *rdev,
3867
				       struct ci_mc_reg_table *table)
3868
{
3869
	struct ci_power_info *pi = ci_get_pi(rdev);
3870
	u8 i, j, k;
3871
	u32 temp_reg;
3872
 
3873
	for (i = 0, j = table->last; i < table->last; i++) {
3874
		if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
3875
			return -EINVAL;
3876
		switch(table->mc_reg_address[i].s1 << 2) {
3877
		case MC_SEQ_MISC1:
3878
			temp_reg = RREG32(MC_PMG_CMD_EMRS);
3879
			table->mc_reg_address[j].s1 = MC_PMG_CMD_EMRS >> 2;
3880
			table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_EMRS_LP >> 2;
3881
			for (k = 0; k < table->num_entries; k++) {
3882
				table->mc_reg_table_entry[k].mc_data[j] =
3883
					((temp_reg & 0xffff0000)) | ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
3884
			}
3885
			j++;
3886
			if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
3887
				return -EINVAL;
3888
 
3889
			temp_reg = RREG32(MC_PMG_CMD_MRS);
3890
			table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS >> 2;
3891
			table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS_LP >> 2;
3892
			for (k = 0; k < table->num_entries; k++) {
3893
				table->mc_reg_table_entry[k].mc_data[j] =
3894
					(temp_reg & 0xffff0000) | (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
3895
				if (!pi->mem_gddr5)
3896
					table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
3897
			}
3898
			j++;
3899
			if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
3900
				return -EINVAL;
3901
 
3902
			if (!pi->mem_gddr5) {
3903
				table->mc_reg_address[j].s1 = MC_PMG_AUTO_CMD >> 2;
3904
				table->mc_reg_address[j].s0 = MC_PMG_AUTO_CMD >> 2;
3905
				for (k = 0; k < table->num_entries; k++) {
3906
					table->mc_reg_table_entry[k].mc_data[j] =
3907
						(table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
3908
				}
3909
				j++;
3910
				if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
3911
					return -EINVAL;
3912
			}
3913
			break;
3914
		case MC_SEQ_RESERVE_M:
3915
			temp_reg = RREG32(MC_PMG_CMD_MRS1);
3916
			table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS1 >> 2;
3917
			table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS1_LP >> 2;
3918
			for (k = 0; k < table->num_entries; k++) {
3919
				table->mc_reg_table_entry[k].mc_data[j] =
3920
					(temp_reg & 0xffff0000) | (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
3921
			}
3922
			j++;
3923
			if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
3924
				return -EINVAL;
3925
			break;
3926
		default:
3927
			break;
3928
		}
3929
 
3930
	}
3931
 
3932
	table->last = j;
3933
 
3934
	return 0;
3935
}
3936
 
3937
static bool ci_check_s0_mc_reg_index(u16 in_reg, u16 *out_reg)
3938
{
3939
	bool result = true;
3940
 
3941
	switch(in_reg) {
3942
	case MC_SEQ_RAS_TIMING >> 2:
3943
		*out_reg = MC_SEQ_RAS_TIMING_LP >> 2;
3944
		break;
3945
	case MC_SEQ_DLL_STBY >> 2:
3946
		*out_reg = MC_SEQ_DLL_STBY_LP >> 2;
3947
		break;
3948
	case MC_SEQ_G5PDX_CMD0 >> 2:
3949
		*out_reg = MC_SEQ_G5PDX_CMD0_LP >> 2;
3950
		break;
3951
	case MC_SEQ_G5PDX_CMD1 >> 2:
3952
		*out_reg = MC_SEQ_G5PDX_CMD1_LP >> 2;
3953
		break;
3954
	case MC_SEQ_G5PDX_CTRL >> 2:
3955
		*out_reg = MC_SEQ_G5PDX_CTRL_LP >> 2;
3956
		break;
3957
	case MC_SEQ_CAS_TIMING >> 2:
3958
		*out_reg = MC_SEQ_CAS_TIMING_LP >> 2;
3959
            break;
3960
	case MC_SEQ_MISC_TIMING >> 2:
3961
		*out_reg = MC_SEQ_MISC_TIMING_LP >> 2;
3962
		break;
3963
	case MC_SEQ_MISC_TIMING2 >> 2:
3964
		*out_reg = MC_SEQ_MISC_TIMING2_LP >> 2;
3965
		break;
3966
	case MC_SEQ_PMG_DVS_CMD >> 2:
3967
		*out_reg = MC_SEQ_PMG_DVS_CMD_LP >> 2;
3968
		break;
3969
	case MC_SEQ_PMG_DVS_CTL >> 2:
3970
		*out_reg = MC_SEQ_PMG_DVS_CTL_LP >> 2;
3971
		break;
3972
	case MC_SEQ_RD_CTL_D0 >> 2:
3973
		*out_reg = MC_SEQ_RD_CTL_D0_LP >> 2;
3974
		break;
3975
	case MC_SEQ_RD_CTL_D1 >> 2:
3976
		*out_reg = MC_SEQ_RD_CTL_D1_LP >> 2;
3977
		break;
3978
	case MC_SEQ_WR_CTL_D0 >> 2:
3979
		*out_reg = MC_SEQ_WR_CTL_D0_LP >> 2;
3980
		break;
3981
	case MC_SEQ_WR_CTL_D1 >> 2:
3982
		*out_reg = MC_SEQ_WR_CTL_D1_LP >> 2;
3983
		break;
3984
	case MC_PMG_CMD_EMRS >> 2:
3985
		*out_reg = MC_SEQ_PMG_CMD_EMRS_LP >> 2;
3986
		break;
3987
	case MC_PMG_CMD_MRS >> 2:
3988
		*out_reg = MC_SEQ_PMG_CMD_MRS_LP >> 2;
3989
		break;
3990
	case MC_PMG_CMD_MRS1 >> 2:
3991
		*out_reg = MC_SEQ_PMG_CMD_MRS1_LP >> 2;
3992
		break;
3993
	case MC_SEQ_PMG_TIMING >> 2:
3994
		*out_reg = MC_SEQ_PMG_TIMING_LP >> 2;
3995
		break;
3996
	case MC_PMG_CMD_MRS2 >> 2:
3997
		*out_reg = MC_SEQ_PMG_CMD_MRS2_LP >> 2;
3998
		break;
3999
	case MC_SEQ_WR_CTL_2 >> 2:
4000
		*out_reg = MC_SEQ_WR_CTL_2_LP >> 2;
4001
		break;
4002
	default:
4003
		result = false;
4004
		break;
4005
	}
4006
 
4007
	return result;
4008
}
4009
 
4010
static void ci_set_valid_flag(struct ci_mc_reg_table *table)
4011
{
4012
	u8 i, j;
4013
 
4014
	for (i = 0; i < table->last; i++) {
4015
		for (j = 1; j < table->num_entries; j++) {
4016
			if (table->mc_reg_table_entry[j-1].mc_data[i] !=
4017
			    table->mc_reg_table_entry[j].mc_data[i]) {
4018
				table->valid_flag |= 1 << i;
4019
				break;
4020
			}
4021
		}
4022
	}
4023
}
4024
 
4025
static void ci_set_s0_mc_reg_index(struct ci_mc_reg_table *table)
4026
{
4027
	u32 i;
4028
	u16 address;
4029
 
4030
	for (i = 0; i < table->last; i++) {
4031
		table->mc_reg_address[i].s0 =
4032
			ci_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address) ?
4033
			address : table->mc_reg_address[i].s1;
4034
	}
4035
}
4036
 
4037
static int ci_copy_vbios_mc_reg_table(const struct atom_mc_reg_table *table,
4038
				      struct ci_mc_reg_table *ci_table)
4039
{
4040
	u8 i, j;
4041
 
4042
	if (table->last > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4043
		return -EINVAL;
4044
	if (table->num_entries > MAX_AC_TIMING_ENTRIES)
4045
		return -EINVAL;
4046
 
4047
	for (i = 0; i < table->last; i++)
4048
		ci_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1;
4049
 
4050
	ci_table->last = table->last;
4051
 
4052
	for (i = 0; i < table->num_entries; i++) {
4053
		ci_table->mc_reg_table_entry[i].mclk_max =
4054
			table->mc_reg_table_entry[i].mclk_max;
4055
		for (j = 0; j < table->last; j++)
4056
			ci_table->mc_reg_table_entry[i].mc_data[j] =
4057
				table->mc_reg_table_entry[i].mc_data[j];
4058
	}
4059
	ci_table->num_entries = table->num_entries;
4060
 
4061
	return 0;
4062
}
4063
 
4064
static int ci_initialize_mc_reg_table(struct radeon_device *rdev)
4065
{
4066
	struct ci_power_info *pi = ci_get_pi(rdev);
4067
	struct atom_mc_reg_table *table;
4068
	struct ci_mc_reg_table *ci_table = &pi->mc_reg_table;
4069
	u8 module_index = rv770_get_memory_module_index(rdev);
4070
	int ret;
4071
 
4072
	table = kzalloc(sizeof(struct atom_mc_reg_table), GFP_KERNEL);
4073
	if (!table)
4074
		return -ENOMEM;
4075
 
4076
	WREG32(MC_SEQ_RAS_TIMING_LP, RREG32(MC_SEQ_RAS_TIMING));
4077
	WREG32(MC_SEQ_CAS_TIMING_LP, RREG32(MC_SEQ_CAS_TIMING));
4078
	WREG32(MC_SEQ_DLL_STBY_LP, RREG32(MC_SEQ_DLL_STBY));
4079
	WREG32(MC_SEQ_G5PDX_CMD0_LP, RREG32(MC_SEQ_G5PDX_CMD0));
4080
	WREG32(MC_SEQ_G5PDX_CMD1_LP, RREG32(MC_SEQ_G5PDX_CMD1));
4081
	WREG32(MC_SEQ_G5PDX_CTRL_LP, RREG32(MC_SEQ_G5PDX_CTRL));
4082
	WREG32(MC_SEQ_PMG_DVS_CMD_LP, RREG32(MC_SEQ_PMG_DVS_CMD));
4083
	WREG32(MC_SEQ_PMG_DVS_CTL_LP, RREG32(MC_SEQ_PMG_DVS_CTL));
4084
	WREG32(MC_SEQ_MISC_TIMING_LP, RREG32(MC_SEQ_MISC_TIMING));
4085
	WREG32(MC_SEQ_MISC_TIMING2_LP, RREG32(MC_SEQ_MISC_TIMING2));
4086
	WREG32(MC_SEQ_PMG_CMD_EMRS_LP, RREG32(MC_PMG_CMD_EMRS));
4087
	WREG32(MC_SEQ_PMG_CMD_MRS_LP, RREG32(MC_PMG_CMD_MRS));
4088
	WREG32(MC_SEQ_PMG_CMD_MRS1_LP, RREG32(MC_PMG_CMD_MRS1));
4089
	WREG32(MC_SEQ_WR_CTL_D0_LP, RREG32(MC_SEQ_WR_CTL_D0));
4090
	WREG32(MC_SEQ_WR_CTL_D1_LP, RREG32(MC_SEQ_WR_CTL_D1));
4091
	WREG32(MC_SEQ_RD_CTL_D0_LP, RREG32(MC_SEQ_RD_CTL_D0));
4092
	WREG32(MC_SEQ_RD_CTL_D1_LP, RREG32(MC_SEQ_RD_CTL_D1));
4093
	WREG32(MC_SEQ_PMG_TIMING_LP, RREG32(MC_SEQ_PMG_TIMING));
4094
	WREG32(MC_SEQ_PMG_CMD_MRS2_LP, RREG32(MC_PMG_CMD_MRS2));
4095
	WREG32(MC_SEQ_WR_CTL_2_LP, RREG32(MC_SEQ_WR_CTL_2));
4096
 
4097
	ret = radeon_atom_init_mc_reg_table(rdev, module_index, table);
4098
	if (ret)
4099
		goto init_mc_done;
4100
 
4101
        ret = ci_copy_vbios_mc_reg_table(table, ci_table);
4102
	if (ret)
4103
		goto init_mc_done;
4104
 
4105
	ci_set_s0_mc_reg_index(ci_table);
4106
 
4107
	ret = ci_set_mc_special_registers(rdev, ci_table);
4108
	if (ret)
4109
		goto init_mc_done;
4110
 
4111
	ci_set_valid_flag(ci_table);
4112
 
4113
init_mc_done:
4114
	kfree(table);
4115
 
4116
	return ret;
4117
}
4118
 
4119
static int ci_populate_mc_reg_addresses(struct radeon_device *rdev,
4120
					SMU7_Discrete_MCRegisters *mc_reg_table)
4121
{
4122
	struct ci_power_info *pi = ci_get_pi(rdev);
4123
	u32 i, j;
4124
 
4125
	for (i = 0, j = 0; j < pi->mc_reg_table.last; j++) {
4126
		if (pi->mc_reg_table.valid_flag & (1 << j)) {
4127
			if (i >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4128
				return -EINVAL;
4129
			mc_reg_table->address[i].s0 = cpu_to_be16(pi->mc_reg_table.mc_reg_address[j].s0);
4130
			mc_reg_table->address[i].s1 = cpu_to_be16(pi->mc_reg_table.mc_reg_address[j].s1);
4131
			i++;
4132
		}
4133
	}
4134
 
4135
	mc_reg_table->last = (u8)i;
4136
 
4137
	return 0;
4138
}
4139
 
4140
static void ci_convert_mc_registers(const struct ci_mc_reg_entry *entry,
4141
				    SMU7_Discrete_MCRegisterSet *data,
4142
				    u32 num_entries, u32 valid_flag)
4143
{
4144
	u32 i, j;
4145
 
4146
	for (i = 0, j = 0; j < num_entries; j++) {
4147
		if (valid_flag & (1 << j)) {
4148
			data->value[i] = cpu_to_be32(entry->mc_data[j]);
4149
			i++;
4150
		}
4151
	}
4152
}
4153
 
4154
static void ci_convert_mc_reg_table_entry_to_smc(struct radeon_device *rdev,
4155
						 const u32 memory_clock,
4156
						 SMU7_Discrete_MCRegisterSet *mc_reg_table_data)
4157
{
4158
	struct ci_power_info *pi = ci_get_pi(rdev);
4159
	u32 i = 0;
4160
 
4161
	for(i = 0; i < pi->mc_reg_table.num_entries; i++) {
4162
		if (memory_clock <= pi->mc_reg_table.mc_reg_table_entry[i].mclk_max)
4163
			break;
4164
	}
4165
 
4166
	if ((i == pi->mc_reg_table.num_entries) && (i > 0))
4167
		--i;
4168
 
4169
	ci_convert_mc_registers(&pi->mc_reg_table.mc_reg_table_entry[i],
4170
				mc_reg_table_data, pi->mc_reg_table.last,
4171
				pi->mc_reg_table.valid_flag);
4172
}
4173
 
4174
static void ci_convert_mc_reg_table_to_smc(struct radeon_device *rdev,
4175
					   SMU7_Discrete_MCRegisters *mc_reg_table)
4176
{
4177
	struct ci_power_info *pi = ci_get_pi(rdev);
4178
	u32 i;
4179
 
4180
	for (i = 0; i < pi->dpm_table.mclk_table.count; i++)
4181
		ci_convert_mc_reg_table_entry_to_smc(rdev,
4182
						     pi->dpm_table.mclk_table.dpm_levels[i].value,
4183
						     &mc_reg_table->data[i]);
4184
}
4185
 
4186
static int ci_populate_initial_mc_reg_table(struct radeon_device *rdev)
4187
{
4188
	struct ci_power_info *pi = ci_get_pi(rdev);
4189
	int ret;
4190
 
4191
	memset(&pi->smc_mc_reg_table, 0, sizeof(SMU7_Discrete_MCRegisters));
4192
 
4193
	ret = ci_populate_mc_reg_addresses(rdev, &pi->smc_mc_reg_table);
4194
	if (ret)
4195
		return ret;
4196
	ci_convert_mc_reg_table_to_smc(rdev, &pi->smc_mc_reg_table);
4197
 
4198
	return ci_copy_bytes_to_smc(rdev,
4199
				    pi->mc_reg_table_start,
4200
				    (u8 *)&pi->smc_mc_reg_table,
4201
				    sizeof(SMU7_Discrete_MCRegisters),
4202
				    pi->sram_end);
4203
}
4204
 
4205
static int ci_update_and_upload_mc_reg_table(struct radeon_device *rdev)
4206
{
4207
	struct ci_power_info *pi = ci_get_pi(rdev);
4208
 
4209
	if (!(pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK))
4210
		return 0;
4211
 
4212
	memset(&pi->smc_mc_reg_table, 0, sizeof(SMU7_Discrete_MCRegisters));
4213
 
4214
	ci_convert_mc_reg_table_to_smc(rdev, &pi->smc_mc_reg_table);
4215
 
4216
	return ci_copy_bytes_to_smc(rdev,
4217
				    pi->mc_reg_table_start +
4218
				    offsetof(SMU7_Discrete_MCRegisters, data[0]),
4219
				    (u8 *)&pi->smc_mc_reg_table.data[0],
4220
				    sizeof(SMU7_Discrete_MCRegisterSet) *
4221
				    pi->dpm_table.mclk_table.count,
4222
				    pi->sram_end);
4223
}
4224
 
4225
static void ci_enable_voltage_control(struct radeon_device *rdev)
4226
{
4227
	u32 tmp = RREG32_SMC(GENERAL_PWRMGT);
4228
 
4229
	tmp |= VOLT_PWRMGT_EN;
4230
	WREG32_SMC(GENERAL_PWRMGT, tmp);
4231
}
4232
 
4233
static enum radeon_pcie_gen ci_get_maximum_link_speed(struct radeon_device *rdev,
4234
						      struct radeon_ps *radeon_state)
4235
{
4236
	struct ci_ps *state = ci_get_ps(radeon_state);
4237
	int i;
4238
	u16 pcie_speed, max_speed = 0;
4239
 
4240
	for (i = 0; i < state->performance_level_count; i++) {
4241
		pcie_speed = state->performance_levels[i].pcie_gen;
4242
		if (max_speed < pcie_speed)
4243
			max_speed = pcie_speed;
4244
	}
4245
 
4246
	return max_speed;
4247
}
4248
 
4249
static u16 ci_get_current_pcie_speed(struct radeon_device *rdev)
4250
{
4251
	u32 speed_cntl = 0;
4252
 
4253
	speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL) & LC_CURRENT_DATA_RATE_MASK;
4254
	speed_cntl >>= LC_CURRENT_DATA_RATE_SHIFT;
4255
 
4256
	return (u16)speed_cntl;
4257
}
4258
 
4259
static int ci_get_current_pcie_lane_number(struct radeon_device *rdev)
4260
{
4261
	u32 link_width = 0;
4262
 
4263
	link_width = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL) & LC_LINK_WIDTH_RD_MASK;
4264
	link_width >>= LC_LINK_WIDTH_RD_SHIFT;
4265
 
4266
	switch (link_width) {
4267
	case RADEON_PCIE_LC_LINK_WIDTH_X1:
4268
		return 1;
4269
	case RADEON_PCIE_LC_LINK_WIDTH_X2:
4270
		return 2;
4271
	case RADEON_PCIE_LC_LINK_WIDTH_X4:
4272
		return 4;
4273
	case RADEON_PCIE_LC_LINK_WIDTH_X8:
4274
		return 8;
4275
	case RADEON_PCIE_LC_LINK_WIDTH_X12:
4276
		/* not actually supported */
4277
		return 12;
4278
	case RADEON_PCIE_LC_LINK_WIDTH_X0:
4279
	case RADEON_PCIE_LC_LINK_WIDTH_X16:
4280
	default:
4281
		return 16;
4282
	}
4283
}
4284
 
4285
static void ci_request_link_speed_change_before_state_change(struct radeon_device *rdev,
4286
							     struct radeon_ps *radeon_new_state,
4287
							     struct radeon_ps *radeon_current_state)
4288
{
4289
	struct ci_power_info *pi = ci_get_pi(rdev);
4290
	enum radeon_pcie_gen target_link_speed =
4291
		ci_get_maximum_link_speed(rdev, radeon_new_state);
4292
	enum radeon_pcie_gen current_link_speed;
4293
 
4294
	if (pi->force_pcie_gen == RADEON_PCIE_GEN_INVALID)
4295
		current_link_speed = ci_get_maximum_link_speed(rdev, radeon_current_state);
4296
	else
4297
		current_link_speed = pi->force_pcie_gen;
4298
 
4299
	pi->force_pcie_gen = RADEON_PCIE_GEN_INVALID;
4300
	pi->pspp_notify_required = false;
4301
	if (target_link_speed > current_link_speed) {
4302
		switch (target_link_speed) {
4303
#ifdef CONFIG_ACPI
4304
		case RADEON_PCIE_GEN3:
4305
			if (radeon_acpi_pcie_performance_request(rdev, PCIE_PERF_REQ_PECI_GEN3, false) == 0)
4306
				break;
4307
			pi->force_pcie_gen = RADEON_PCIE_GEN2;
4308
			if (current_link_speed == RADEON_PCIE_GEN2)
4309
				break;
4310
		case RADEON_PCIE_GEN2:
4311
			if (radeon_acpi_pcie_performance_request(rdev, PCIE_PERF_REQ_PECI_GEN2, false) == 0)
4312
				break;
4313
#endif
4314
		default:
4315
			pi->force_pcie_gen = ci_get_current_pcie_speed(rdev);
4316
			break;
4317
		}
4318
	} else {
4319
		if (target_link_speed < current_link_speed)
4320
			pi->pspp_notify_required = true;
4321
	}
4322
}
4323
 
4324
static void ci_notify_link_speed_change_after_state_change(struct radeon_device *rdev,
4325
							   struct radeon_ps *radeon_new_state,
4326
							   struct radeon_ps *radeon_current_state)
4327
{
4328
	struct ci_power_info *pi = ci_get_pi(rdev);
4329
	enum radeon_pcie_gen target_link_speed =
4330
		ci_get_maximum_link_speed(rdev, radeon_new_state);
4331
	u8 request;
4332
 
4333
	if (pi->pspp_notify_required) {
4334
		if (target_link_speed == RADEON_PCIE_GEN3)
4335
			request = PCIE_PERF_REQ_PECI_GEN3;
4336
		else if (target_link_speed == RADEON_PCIE_GEN2)
4337
			request = PCIE_PERF_REQ_PECI_GEN2;
4338
		else
4339
			request = PCIE_PERF_REQ_PECI_GEN1;
4340
 
4341
		if ((request == PCIE_PERF_REQ_PECI_GEN1) &&
4342
		    (ci_get_current_pcie_speed(rdev) > 0))
4343
			return;
4344
 
4345
#ifdef CONFIG_ACPI
4346
		radeon_acpi_pcie_performance_request(rdev, request, false);
4347
#endif
4348
	}
4349
}
4350
 
4351
static int ci_set_private_data_variables_based_on_pptable(struct radeon_device *rdev)
4352
{
4353
	struct ci_power_info *pi = ci_get_pi(rdev);
4354
	struct radeon_clock_voltage_dependency_table *allowed_sclk_vddc_table =
4355
		&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
4356
	struct radeon_clock_voltage_dependency_table *allowed_mclk_vddc_table =
4357
		&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk;
4358
	struct radeon_clock_voltage_dependency_table *allowed_mclk_vddci_table =
4359
		&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk;
4360
 
4361
	if (allowed_sclk_vddc_table == NULL)
4362
		return -EINVAL;
4363
	if (allowed_sclk_vddc_table->count < 1)
4364
		return -EINVAL;
4365
	if (allowed_mclk_vddc_table == NULL)
4366
		return -EINVAL;
4367
	if (allowed_mclk_vddc_table->count < 1)
4368
		return -EINVAL;
4369
	if (allowed_mclk_vddci_table == NULL)
4370
		return -EINVAL;
4371
	if (allowed_mclk_vddci_table->count < 1)
4372
		return -EINVAL;
4373
 
4374
	pi->min_vddc_in_pp_table = allowed_sclk_vddc_table->entries[0].v;
4375
	pi->max_vddc_in_pp_table =
4376
		allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
4377
 
4378
	pi->min_vddci_in_pp_table = allowed_mclk_vddci_table->entries[0].v;
4379
	pi->max_vddci_in_pp_table =
4380
		allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v;
4381
 
4382
	rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk =
4383
		allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk;
4384
	rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.mclk =
4385
		allowed_mclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk;
4386
	rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddc =
4387
		allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
4388
        rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddci =
4389
		allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v;
4390
 
4391
	return 0;
4392
}
4393
 
4394
static void ci_patch_with_vddc_leakage(struct radeon_device *rdev, u16 *vddc)
4395
{
4396
	struct ci_power_info *pi = ci_get_pi(rdev);
4397
	struct ci_leakage_voltage *leakage_table = &pi->vddc_leakage;
4398
	u32 leakage_index;
4399
 
4400
	for (leakage_index = 0; leakage_index < leakage_table->count; leakage_index++) {
4401
		if (leakage_table->leakage_id[leakage_index] == *vddc) {
4402
			*vddc = leakage_table->actual_voltage[leakage_index];
4403
			break;
4404
		}
4405
	}
4406
}
4407
 
4408
static void ci_patch_with_vddci_leakage(struct radeon_device *rdev, u16 *vddci)
4409
{
4410
	struct ci_power_info *pi = ci_get_pi(rdev);
4411
	struct ci_leakage_voltage *leakage_table = &pi->vddci_leakage;
4412
	u32 leakage_index;
4413
 
4414
	for (leakage_index = 0; leakage_index < leakage_table->count; leakage_index++) {
4415
		if (leakage_table->leakage_id[leakage_index] == *vddci) {
4416
			*vddci = leakage_table->actual_voltage[leakage_index];
4417
			break;
4418
		}
4419
	}
4420
}
4421
 
4422
static void ci_patch_clock_voltage_dependency_table_with_vddc_leakage(struct radeon_device *rdev,
4423
								      struct radeon_clock_voltage_dependency_table *table)
4424
{
4425
	u32 i;
4426
 
4427
	if (table) {
4428
		for (i = 0; i < table->count; i++)
4429
			ci_patch_with_vddc_leakage(rdev, &table->entries[i].v);
4430
	}
4431
}
4432
 
4433
static void ci_patch_clock_voltage_dependency_table_with_vddci_leakage(struct radeon_device *rdev,
4434
								       struct radeon_clock_voltage_dependency_table *table)
4435
{
4436
	u32 i;
4437
 
4438
	if (table) {
4439
		for (i = 0; i < table->count; i++)
4440
			ci_patch_with_vddci_leakage(rdev, &table->entries[i].v);
4441
	}
4442
}
4443
 
4444
static void ci_patch_vce_clock_voltage_dependency_table_with_vddc_leakage(struct radeon_device *rdev,
4445
									  struct radeon_vce_clock_voltage_dependency_table *table)
4446
{
4447
	u32 i;
4448
 
4449
	if (table) {
4450
		for (i = 0; i < table->count; i++)
4451
			ci_patch_with_vddc_leakage(rdev, &table->entries[i].v);
4452
	}
4453
}
4454
 
4455
static void ci_patch_uvd_clock_voltage_dependency_table_with_vddc_leakage(struct radeon_device *rdev,
4456
									  struct radeon_uvd_clock_voltage_dependency_table *table)
4457
{
4458
	u32 i;
4459
 
4460
	if (table) {
4461
		for (i = 0; i < table->count; i++)
4462
			ci_patch_with_vddc_leakage(rdev, &table->entries[i].v);
4463
	}
4464
}
4465
 
4466
static void ci_patch_vddc_phase_shed_limit_table_with_vddc_leakage(struct radeon_device *rdev,
4467
								   struct radeon_phase_shedding_limits_table *table)
4468
{
4469
	u32 i;
4470
 
4471
	if (table) {
4472
		for (i = 0; i < table->count; i++)
4473
			ci_patch_with_vddc_leakage(rdev, &table->entries[i].voltage);
4474
	}
4475
}
4476
 
4477
static void ci_patch_clock_voltage_limits_with_vddc_leakage(struct radeon_device *rdev,
4478
							    struct radeon_clock_and_voltage_limits *table)
4479
{
4480
	if (table) {
4481
		ci_patch_with_vddc_leakage(rdev, (u16 *)&table->vddc);
4482
		ci_patch_with_vddci_leakage(rdev, (u16 *)&table->vddci);
4483
	}
4484
}
4485
 
4486
static void ci_patch_cac_leakage_table_with_vddc_leakage(struct radeon_device *rdev,
4487
							 struct radeon_cac_leakage_table *table)
4488
{
4489
	u32 i;
4490
 
4491
	if (table) {
4492
		for (i = 0; i < table->count; i++)
4493
			ci_patch_with_vddc_leakage(rdev, &table->entries[i].vddc);
4494
	}
4495
}
4496
 
4497
static void ci_patch_dependency_tables_with_leakage(struct radeon_device *rdev)
4498
{
4499
 
4500
	ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
4501
								  &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk);
4502
	ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
4503
								  &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk);
4504
	ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
4505
								  &rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk);
4506
	ci_patch_clock_voltage_dependency_table_with_vddci_leakage(rdev,
4507
								   &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk);
4508
	ci_patch_vce_clock_voltage_dependency_table_with_vddc_leakage(rdev,
4509
								      &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table);
4510
	ci_patch_uvd_clock_voltage_dependency_table_with_vddc_leakage(rdev,
4511
								      &rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table);
4512
	ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
4513
								  &rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table);
4514
	ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
4515
								  &rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table);
4516
	ci_patch_vddc_phase_shed_limit_table_with_vddc_leakage(rdev,
4517
							       &rdev->pm.dpm.dyn_state.phase_shedding_limits_table);
4518
	ci_patch_clock_voltage_limits_with_vddc_leakage(rdev,
4519
							&rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac);
4520
	ci_patch_clock_voltage_limits_with_vddc_leakage(rdev,
4521
							&rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc);
4522
	ci_patch_cac_leakage_table_with_vddc_leakage(rdev,
4523
						     &rdev->pm.dpm.dyn_state.cac_leakage_table);
4524
 
4525
}
4526
 
4527
static void ci_get_memory_type(struct radeon_device *rdev)
4528
{
4529
	struct ci_power_info *pi = ci_get_pi(rdev);
4530
	u32 tmp;
4531
 
4532
	tmp = RREG32(MC_SEQ_MISC0);
4533
 
4534
	if (((tmp & MC_SEQ_MISC0_GDDR5_MASK) >> MC_SEQ_MISC0_GDDR5_SHIFT) ==
4535
	    MC_SEQ_MISC0_GDDR5_VALUE)
4536
		pi->mem_gddr5 = true;
4537
	else
4538
		pi->mem_gddr5 = false;
4539
 
4540
}
4541
 
4542
static void ci_update_current_ps(struct radeon_device *rdev,
4543
				 struct radeon_ps *rps)
4544
{
4545
	struct ci_ps *new_ps = ci_get_ps(rps);
4546
	struct ci_power_info *pi = ci_get_pi(rdev);
4547
 
4548
	pi->current_rps = *rps;
4549
	pi->current_ps = *new_ps;
4550
	pi->current_rps.ps_priv = &pi->current_ps;
4551
}
4552
 
4553
static void ci_update_requested_ps(struct radeon_device *rdev,
4554
				   struct radeon_ps *rps)
4555
{
4556
	struct ci_ps *new_ps = ci_get_ps(rps);
4557
	struct ci_power_info *pi = ci_get_pi(rdev);
4558
 
4559
	pi->requested_rps = *rps;
4560
	pi->requested_ps = *new_ps;
4561
	pi->requested_rps.ps_priv = &pi->requested_ps;
4562
}
4563
 
4564
int ci_dpm_pre_set_power_state(struct radeon_device *rdev)
4565
{
4566
	struct ci_power_info *pi = ci_get_pi(rdev);
4567
	struct radeon_ps requested_ps = *rdev->pm.dpm.requested_ps;
4568
	struct radeon_ps *new_ps = &requested_ps;
4569
 
4570
	ci_update_requested_ps(rdev, new_ps);
4571
 
4572
	ci_apply_state_adjust_rules(rdev, &pi->requested_rps);
4573
 
4574
	return 0;
4575
}
4576
 
4577
void ci_dpm_post_set_power_state(struct radeon_device *rdev)
4578
{
4579
	struct ci_power_info *pi = ci_get_pi(rdev);
4580
	struct radeon_ps *new_ps = &pi->requested_rps;
4581
 
4582
	ci_update_current_ps(rdev, new_ps);
4583
}
4584
 
4585
 
4586
void ci_dpm_setup_asic(struct radeon_device *rdev)
4587
{
4588
	int r;
4589
 
4590
	r = ci_mc_load_microcode(rdev);
4591
	if (r)
4592
		DRM_ERROR("Failed to load MC firmware!\n");
4593
	ci_read_clock_registers(rdev);
4594
	ci_get_memory_type(rdev);
4595
	ci_enable_acpi_power_management(rdev);
4596
	ci_init_sclk_t(rdev);
4597
}
4598
 
4599
int ci_dpm_enable(struct radeon_device *rdev)
4600
{
4601
	struct ci_power_info *pi = ci_get_pi(rdev);
4602
	struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
4603
	int ret;
4604
 
4605
	if (ci_is_smc_running(rdev))
4606
		return -EINVAL;
4607
	if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
4608
		ci_enable_voltage_control(rdev);
4609
		ret = ci_construct_voltage_tables(rdev);
4610
		if (ret) {
4611
			DRM_ERROR("ci_construct_voltage_tables failed\n");
4612
			return ret;
4613
		}
4614
	}
4615
	if (pi->caps_dynamic_ac_timing) {
4616
		ret = ci_initialize_mc_reg_table(rdev);
4617
		if (ret)
4618
			pi->caps_dynamic_ac_timing = false;
4619
	}
4620
	if (pi->dynamic_ss)
4621
		ci_enable_spread_spectrum(rdev, true);
4622
	if (pi->thermal_protection)
4623
		ci_enable_thermal_protection(rdev, true);
4624
	ci_program_sstp(rdev);
4625
	ci_enable_display_gap(rdev);
4626
	ci_program_vc(rdev);
4627
	ret = ci_upload_firmware(rdev);
4628
	if (ret) {
4629
		DRM_ERROR("ci_upload_firmware failed\n");
4630
		return ret;
4631
	}
4632
	ret = ci_process_firmware_header(rdev);
4633
	if (ret) {
4634
		DRM_ERROR("ci_process_firmware_header failed\n");
4635
		return ret;
4636
	}
4637
	ret = ci_initial_switch_from_arb_f0_to_f1(rdev);
4638
	if (ret) {
4639
		DRM_ERROR("ci_initial_switch_from_arb_f0_to_f1 failed\n");
4640
		return ret;
4641
	}
4642
	ret = ci_init_smc_table(rdev);
4643
	if (ret) {
4644
		DRM_ERROR("ci_init_smc_table failed\n");
4645
		return ret;
4646
	}
4647
	ret = ci_init_arb_table_index(rdev);
4648
	if (ret) {
4649
		DRM_ERROR("ci_init_arb_table_index failed\n");
4650
		return ret;
4651
	}
4652
	if (pi->caps_dynamic_ac_timing) {
4653
		ret = ci_populate_initial_mc_reg_table(rdev);
4654
		if (ret) {
4655
			DRM_ERROR("ci_populate_initial_mc_reg_table failed\n");
4656
			return ret;
4657
		}
4658
	}
4659
	ret = ci_populate_pm_base(rdev);
4660
	if (ret) {
4661
		DRM_ERROR("ci_populate_pm_base failed\n");
4662
		return ret;
4663
	}
4664
	ci_dpm_start_smc(rdev);
4665
	ci_enable_vr_hot_gpio_interrupt(rdev);
4666
	ret = ci_notify_smc_display_change(rdev, false);
4667
	if (ret) {
4668
		DRM_ERROR("ci_notify_smc_display_change failed\n");
4669
		return ret;
4670
	}
4671
	ci_enable_sclk_control(rdev, true);
4672
	ret = ci_enable_ulv(rdev, true);
4673
	if (ret) {
4674
		DRM_ERROR("ci_enable_ulv failed\n");
4675
		return ret;
4676
	}
4677
	ret = ci_enable_ds_master_switch(rdev, true);
4678
	if (ret) {
4679
		DRM_ERROR("ci_enable_ds_master_switch failed\n");
4680
		return ret;
4681
	}
4682
	ret = ci_start_dpm(rdev);
4683
	if (ret) {
4684
		DRM_ERROR("ci_start_dpm failed\n");
4685
		return ret;
4686
	}
4687
	ret = ci_enable_didt(rdev, true);
4688
	if (ret) {
4689
		DRM_ERROR("ci_enable_didt failed\n");
4690
		return ret;
4691
	}
4692
	ret = ci_enable_smc_cac(rdev, true);
4693
	if (ret) {
4694
		DRM_ERROR("ci_enable_smc_cac failed\n");
4695
		return ret;
4696
	}
4697
	ret = ci_enable_power_containment(rdev, true);
4698
	if (ret) {
4699
		DRM_ERROR("ci_enable_power_containment failed\n");
4700
		return ret;
4701
	}
4702
 
4703
	ci_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
4704
 
4705
	ci_update_current_ps(rdev, boot_ps);
4706
 
4707
	return 0;
4708
}
4709
 
4710
int ci_dpm_late_enable(struct radeon_device *rdev)
4711
{
4712
	int ret;
4713
 
4714
	if (rdev->irq.installed &&
4715
	    r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
4716
#if 0
4717
		PPSMC_Result result;
4718
#endif
4719
		ret = ci_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
4720
		if (ret) {
4721
			DRM_ERROR("ci_set_thermal_temperature_range failed\n");
4722
			return ret;
4723
		}
4724
		rdev->irq.dpm_thermal = true;
4725
		radeon_irq_set(rdev);
4726
#if 0
4727
		result = ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableThermalInterrupt);
4728
 
4729
		if (result != PPSMC_Result_OK)
4730
			DRM_DEBUG_KMS("Could not enable thermal interrupts.\n");
4731
#endif
4732
	}
4733
 
4734
	ci_dpm_powergate_uvd(rdev, true);
4735
 
4736
	return 0;
4737
}
4738
 
4739
void ci_dpm_disable(struct radeon_device *rdev)
4740
{
4741
	struct ci_power_info *pi = ci_get_pi(rdev);
4742
	struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
4743
 
4744
	ci_dpm_powergate_uvd(rdev, false);
4745
 
4746
	if (!ci_is_smc_running(rdev))
4747
		return;
4748
 
4749
	if (pi->thermal_protection)
4750
		ci_enable_thermal_protection(rdev, false);
4751
	ci_enable_power_containment(rdev, false);
4752
	ci_enable_smc_cac(rdev, false);
4753
	ci_enable_didt(rdev, false);
4754
	ci_enable_spread_spectrum(rdev, false);
4755
	ci_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, false);
4756
	ci_stop_dpm(rdev);
4757
	ci_enable_ds_master_switch(rdev, true);
4758
	ci_enable_ulv(rdev, false);
4759
	ci_clear_vc(rdev);
4760
	ci_reset_to_default(rdev);
4761
	ci_dpm_stop_smc(rdev);
4762
	ci_force_switch_to_arb_f0(rdev);
4763
 
4764
	ci_update_current_ps(rdev, boot_ps);
4765
}
4766
 
4767
int ci_dpm_set_power_state(struct radeon_device *rdev)
4768
{
4769
	struct ci_power_info *pi = ci_get_pi(rdev);
4770
	struct radeon_ps *new_ps = &pi->requested_rps;
4771
	struct radeon_ps *old_ps = &pi->current_rps;
4772
	int ret;
4773
 
4774
	ci_find_dpm_states_clocks_in_dpm_table(rdev, new_ps);
4775
	if (pi->pcie_performance_request)
4776
		ci_request_link_speed_change_before_state_change(rdev, new_ps, old_ps);
4777
	ret = ci_freeze_sclk_mclk_dpm(rdev);
4778
	if (ret) {
4779
		DRM_ERROR("ci_freeze_sclk_mclk_dpm failed\n");
4780
		return ret;
4781
	}
4782
	ret = ci_populate_and_upload_sclk_mclk_dpm_levels(rdev, new_ps);
4783
	if (ret) {
4784
		DRM_ERROR("ci_populate_and_upload_sclk_mclk_dpm_levels failed\n");
4785
		return ret;
4786
	}
4787
	ret = ci_generate_dpm_level_enable_mask(rdev, new_ps);
4788
	if (ret) {
4789
		DRM_ERROR("ci_generate_dpm_level_enable_mask failed\n");
4790
		return ret;
4791
	}
4792
 
4793
	ret = ci_update_vce_dpm(rdev, new_ps, old_ps);
4794
	if (ret) {
4795
		DRM_ERROR("ci_update_vce_dpm failed\n");
4796
		return ret;
4797
	}
4798
 
4799
	ret = ci_update_sclk_t(rdev);
4800
	if (ret) {
4801
		DRM_ERROR("ci_update_sclk_t failed\n");
4802
		return ret;
4803
	}
4804
	if (pi->caps_dynamic_ac_timing) {
4805
		ret = ci_update_and_upload_mc_reg_table(rdev);
4806
		if (ret) {
4807
			DRM_ERROR("ci_update_and_upload_mc_reg_table failed\n");
4808
			return ret;
4809
		}
4810
	}
4811
	ret = ci_program_memory_timing_parameters(rdev);
4812
	if (ret) {
4813
		DRM_ERROR("ci_program_memory_timing_parameters failed\n");
4814
		return ret;
4815
	}
4816
	ret = ci_unfreeze_sclk_mclk_dpm(rdev);
4817
	if (ret) {
4818
		DRM_ERROR("ci_unfreeze_sclk_mclk_dpm failed\n");
4819
		return ret;
4820
	}
4821
	ret = ci_upload_dpm_level_enable_mask(rdev);
4822
	if (ret) {
4823
		DRM_ERROR("ci_upload_dpm_level_enable_mask failed\n");
4824
		return ret;
4825
	}
4826
	if (pi->pcie_performance_request)
4827
		ci_notify_link_speed_change_after_state_change(rdev, new_ps, old_ps);
4828
 
4829
	return 0;
4830
}
4831
 
4832
int ci_dpm_power_control_set_level(struct radeon_device *rdev)
4833
{
4834
	return ci_power_control_set_level(rdev);
4835
}
4836
 
4837
void ci_dpm_reset_asic(struct radeon_device *rdev)
4838
{
4839
	ci_set_boot_state(rdev);
4840
}
4841
 
4842
void ci_dpm_display_configuration_changed(struct radeon_device *rdev)
4843
{
4844
	ci_program_display_gap(rdev);
4845
}
4846
 
4847
union power_info {
4848
	struct _ATOM_POWERPLAY_INFO info;
4849
	struct _ATOM_POWERPLAY_INFO_V2 info_2;
4850
	struct _ATOM_POWERPLAY_INFO_V3 info_3;
4851
	struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
4852
	struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
4853
	struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
4854
};
4855
 
4856
union pplib_clock_info {
4857
	struct _ATOM_PPLIB_R600_CLOCK_INFO r600;
4858
	struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780;
4859
	struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen;
4860
	struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo;
4861
	struct _ATOM_PPLIB_SI_CLOCK_INFO si;
4862
	struct _ATOM_PPLIB_CI_CLOCK_INFO ci;
4863
};
4864
 
4865
union pplib_power_state {
4866
	struct _ATOM_PPLIB_STATE v1;
4867
	struct _ATOM_PPLIB_STATE_V2 v2;
4868
};
4869
 
4870
static void ci_parse_pplib_non_clock_info(struct radeon_device *rdev,
4871
					  struct radeon_ps *rps,
4872
					  struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info,
4873
					  u8 table_rev)
4874
{
4875
	rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings);
4876
	rps->class = le16_to_cpu(non_clock_info->usClassification);
4877
	rps->class2 = le16_to_cpu(non_clock_info->usClassification2);
4878
 
4879
	if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) {
4880
		rps->vclk = le32_to_cpu(non_clock_info->ulVCLK);
4881
		rps->dclk = le32_to_cpu(non_clock_info->ulDCLK);
4882
	} else {
4883
		rps->vclk = 0;
4884
		rps->dclk = 0;
4885
	}
4886
 
4887
	if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT)
4888
		rdev->pm.dpm.boot_ps = rps;
4889
	if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
4890
		rdev->pm.dpm.uvd_ps = rps;
4891
}
4892
 
4893
static void ci_parse_pplib_clock_info(struct radeon_device *rdev,
4894
				      struct radeon_ps *rps, int index,
4895
				      union pplib_clock_info *clock_info)
4896
{
4897
	struct ci_power_info *pi = ci_get_pi(rdev);
4898
	struct ci_ps *ps = ci_get_ps(rps);
4899
	struct ci_pl *pl = &ps->performance_levels[index];
4900
 
4901
	ps->performance_level_count = index + 1;
4902
 
4903
	pl->sclk = le16_to_cpu(clock_info->ci.usEngineClockLow);
4904
	pl->sclk |= clock_info->ci.ucEngineClockHigh << 16;
4905
	pl->mclk = le16_to_cpu(clock_info->ci.usMemoryClockLow);
4906
	pl->mclk |= clock_info->ci.ucMemoryClockHigh << 16;
4907
 
4908
	pl->pcie_gen = r600_get_pcie_gen_support(rdev,
4909
						 pi->sys_pcie_mask,
4910
						 pi->vbios_boot_state.pcie_gen_bootup_value,
4911
						 clock_info->ci.ucPCIEGen);
4912
	pl->pcie_lane = r600_get_pcie_lane_support(rdev,
4913
						   pi->vbios_boot_state.pcie_lane_bootup_value,
4914
						   le16_to_cpu(clock_info->ci.usPCIELane));
4915
 
4916
	if (rps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) {
4917
		pi->acpi_pcie_gen = pl->pcie_gen;
4918
	}
4919
 
4920
	if (rps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) {
4921
		pi->ulv.supported = true;
4922
		pi->ulv.pl = *pl;
4923
		pi->ulv.cg_ulv_parameter = CISLANDS_CGULVPARAMETER_DFLT;
4924
	}
4925
 
4926
	/* patch up boot state */
4927
	if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) {
4928
		pl->mclk = pi->vbios_boot_state.mclk_bootup_value;
4929
		pl->sclk = pi->vbios_boot_state.sclk_bootup_value;
4930
		pl->pcie_gen = pi->vbios_boot_state.pcie_gen_bootup_value;
4931
		pl->pcie_lane = pi->vbios_boot_state.pcie_lane_bootup_value;
4932
	}
4933
 
4934
	switch (rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
4935
	case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
4936
		pi->use_pcie_powersaving_levels = true;
4937
		if (pi->pcie_gen_powersaving.max < pl->pcie_gen)
4938
			pi->pcie_gen_powersaving.max = pl->pcie_gen;
4939
		if (pi->pcie_gen_powersaving.min > pl->pcie_gen)
4940
			pi->pcie_gen_powersaving.min = pl->pcie_gen;
4941
		if (pi->pcie_lane_powersaving.max < pl->pcie_lane)
4942
			pi->pcie_lane_powersaving.max = pl->pcie_lane;
4943
		if (pi->pcie_lane_powersaving.min > pl->pcie_lane)
4944
			pi->pcie_lane_powersaving.min = pl->pcie_lane;
4945
		break;
4946
	case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
4947
		pi->use_pcie_performance_levels = true;
4948
		if (pi->pcie_gen_performance.max < pl->pcie_gen)
4949
			pi->pcie_gen_performance.max = pl->pcie_gen;
4950
		if (pi->pcie_gen_performance.min > pl->pcie_gen)
4951
			pi->pcie_gen_performance.min = pl->pcie_gen;
4952
		if (pi->pcie_lane_performance.max < pl->pcie_lane)
4953
			pi->pcie_lane_performance.max = pl->pcie_lane;
4954
		if (pi->pcie_lane_performance.min > pl->pcie_lane)
4955
			pi->pcie_lane_performance.min = pl->pcie_lane;
4956
		break;
4957
	default:
4958
		break;
4959
	}
4960
}
4961
 
4962
static int ci_parse_power_table(struct radeon_device *rdev)
4963
{
4964
	struct radeon_mode_info *mode_info = &rdev->mode_info;
4965
	struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
4966
	union pplib_power_state *power_state;
4967
	int i, j, k, non_clock_array_index, clock_array_index;
4968
	union pplib_clock_info *clock_info;
4969
	struct _StateArray *state_array;
4970
	struct _ClockInfoArray *clock_info_array;
4971
	struct _NonClockInfoArray *non_clock_info_array;
4972
	union power_info *power_info;
4973
	int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
4974
        u16 data_offset;
4975
	u8 frev, crev;
4976
	u8 *power_state_offset;
4977
	struct ci_ps *ps;
4978
 
4979
	if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
4980
				   &frev, &crev, &data_offset))
4981
		return -EINVAL;
4982
	power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
4983
 
4984
	state_array = (struct _StateArray *)
4985
		(mode_info->atom_context->bios + data_offset +
4986
		 le16_to_cpu(power_info->pplib.usStateArrayOffset));
4987
	clock_info_array = (struct _ClockInfoArray *)
4988
		(mode_info->atom_context->bios + data_offset +
4989
		 le16_to_cpu(power_info->pplib.usClockInfoArrayOffset));
4990
	non_clock_info_array = (struct _NonClockInfoArray *)
4991
		(mode_info->atom_context->bios + data_offset +
4992
		 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
4993
 
4994
	rdev->pm.dpm.ps = kzalloc(sizeof(struct radeon_ps) *
4995
				  state_array->ucNumEntries, GFP_KERNEL);
4996
	if (!rdev->pm.dpm.ps)
4997
		return -ENOMEM;
4998
	power_state_offset = (u8 *)state_array->states;
4999
	for (i = 0; i < state_array->ucNumEntries; i++) {
5000
		u8 *idx;
5001
		power_state = (union pplib_power_state *)power_state_offset;
5002
		non_clock_array_index = power_state->v2.nonClockInfoIndex;
5003
		non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
5004
			&non_clock_info_array->nonClockInfo[non_clock_array_index];
5005
		if (!rdev->pm.power_state[i].clock_info)
5006
			return -EINVAL;
5007
		ps = kzalloc(sizeof(struct ci_ps), GFP_KERNEL);
5008
		if (ps == NULL) {
5009
			kfree(rdev->pm.dpm.ps);
5010
			return -ENOMEM;
5011
		}
5012
		rdev->pm.dpm.ps[i].ps_priv = ps;
5013
		ci_parse_pplib_non_clock_info(rdev, &rdev->pm.dpm.ps[i],
5014
					      non_clock_info,
5015
					      non_clock_info_array->ucEntrySize);
5016
		k = 0;
5017
		idx = (u8 *)&power_state->v2.clockInfoIndex[0];
5018
		for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
5019
			clock_array_index = idx[j];
5020
			if (clock_array_index >= clock_info_array->ucNumEntries)
5021
				continue;
5022
			if (k >= CISLANDS_MAX_HARDWARE_POWERLEVELS)
5023
				break;
5024
			clock_info = (union pplib_clock_info *)
5025
				((u8 *)&clock_info_array->clockInfo[0] +
5026
				 (clock_array_index * clock_info_array->ucEntrySize));
5027
			ci_parse_pplib_clock_info(rdev,
5028
						  &rdev->pm.dpm.ps[i], k,
5029
						  clock_info);
5030
			k++;
5031
		}
5032
		power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
5033
	}
5034
	rdev->pm.dpm.num_ps = state_array->ucNumEntries;
5035
 
5036
	/* fill in the vce power states */
5037
	for (i = 0; i < RADEON_MAX_VCE_LEVELS; i++) {
5038
		u32 sclk, mclk;
5039
		clock_array_index = rdev->pm.dpm.vce_states[i].clk_idx;
5040
		clock_info = (union pplib_clock_info *)
5041
			&clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize];
5042
		sclk = le16_to_cpu(clock_info->ci.usEngineClockLow);
5043
		sclk |= clock_info->ci.ucEngineClockHigh << 16;
5044
		mclk = le16_to_cpu(clock_info->ci.usMemoryClockLow);
5045
		mclk |= clock_info->ci.ucMemoryClockHigh << 16;
5046
		rdev->pm.dpm.vce_states[i].sclk = sclk;
5047
		rdev->pm.dpm.vce_states[i].mclk = mclk;
5048
	}
5049
 
5050
	return 0;
5051
}
5052
 
5053
static int ci_get_vbios_boot_values(struct radeon_device *rdev,
5054
				    struct ci_vbios_boot_state *boot_state)
5055
{
5056
	struct radeon_mode_info *mode_info = &rdev->mode_info;
5057
	int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
5058
	ATOM_FIRMWARE_INFO_V2_2 *firmware_info;
5059
	u8 frev, crev;
5060
	u16 data_offset;
5061
 
5062
	if (atom_parse_data_header(mode_info->atom_context, index, NULL,
5063
				   &frev, &crev, &data_offset)) {
5064
		firmware_info =
5065
			(ATOM_FIRMWARE_INFO_V2_2 *)(mode_info->atom_context->bios +
5066
						    data_offset);
5067
		boot_state->mvdd_bootup_value = le16_to_cpu(firmware_info->usBootUpMVDDCVoltage);
5068
		boot_state->vddc_bootup_value = le16_to_cpu(firmware_info->usBootUpVDDCVoltage);
5069
		boot_state->vddci_bootup_value = le16_to_cpu(firmware_info->usBootUpVDDCIVoltage);
5070
		boot_state->pcie_gen_bootup_value = ci_get_current_pcie_speed(rdev);
5071
		boot_state->pcie_lane_bootup_value = ci_get_current_pcie_lane_number(rdev);
5072
		boot_state->sclk_bootup_value = le32_to_cpu(firmware_info->ulDefaultEngineClock);
5073
		boot_state->mclk_bootup_value = le32_to_cpu(firmware_info->ulDefaultMemoryClock);
5074
 
5075
		return 0;
5076
	}
5077
	return -EINVAL;
5078
}
5079
 
5080
void ci_dpm_fini(struct radeon_device *rdev)
5081
{
5082
	int i;
5083
 
5084
	for (i = 0; i < rdev->pm.dpm.num_ps; i++) {
5085
		kfree(rdev->pm.dpm.ps[i].ps_priv);
5086
	}
5087
	kfree(rdev->pm.dpm.ps);
5088
	kfree(rdev->pm.dpm.priv);
5089
	kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries);
5090
	r600_free_extended_power_table(rdev);
5091
}
5092
 
5093
int ci_dpm_init(struct radeon_device *rdev)
5094
{
5095
	int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info);
5096
	u16 data_offset, size;
5097
	u8 frev, crev;
5098
	struct ci_power_info *pi;
5099
	int ret;
5100
	u32 mask;
5101
 
5102
	pi = kzalloc(sizeof(struct ci_power_info), GFP_KERNEL);
5103
	if (pi == NULL)
5104
		return -ENOMEM;
5105
	rdev->pm.dpm.priv = pi;
5106
 
5107
	ret = drm_pcie_get_speed_cap_mask(rdev->ddev, &mask);
5108
	if (ret)
5109
		pi->sys_pcie_mask = 0;
5110
	else
5111
		pi->sys_pcie_mask = mask;
5112
	pi->force_pcie_gen = RADEON_PCIE_GEN_INVALID;
5113
 
5114
	pi->pcie_gen_performance.max = RADEON_PCIE_GEN1;
5115
	pi->pcie_gen_performance.min = RADEON_PCIE_GEN3;
5116
	pi->pcie_gen_powersaving.max = RADEON_PCIE_GEN1;
5117
	pi->pcie_gen_powersaving.min = RADEON_PCIE_GEN3;
5118
 
5119
	pi->pcie_lane_performance.max = 0;
5120
	pi->pcie_lane_performance.min = 16;
5121
	pi->pcie_lane_powersaving.max = 0;
5122
	pi->pcie_lane_powersaving.min = 16;
5123
 
5124
	ret = ci_get_vbios_boot_values(rdev, &pi->vbios_boot_state);
5125
	if (ret) {
5126
		ci_dpm_fini(rdev);
5127
		return ret;
5128
	}
5129
 
5130
	ret = r600_get_platform_caps(rdev);
5131
	if (ret) {
5132
		ci_dpm_fini(rdev);
5133
		return ret;
5134
	}
5135
 
5136
	ret = r600_parse_extended_power_table(rdev);
5137
	if (ret) {
5138
		ci_dpm_fini(rdev);
5139
		return ret;
5140
	}
5141
 
5142
	ret = ci_parse_power_table(rdev);
5143
	if (ret) {
5144
		ci_dpm_fini(rdev);
5145
		return ret;
5146
	}
5147
 
5148
        pi->dll_default_on = false;
5149
        pi->sram_end = SMC_RAM_END;
5150
 
5151
	pi->activity_target[0] = CISLAND_TARGETACTIVITY_DFLT;
5152
	pi->activity_target[1] = CISLAND_TARGETACTIVITY_DFLT;
5153
	pi->activity_target[2] = CISLAND_TARGETACTIVITY_DFLT;
5154
	pi->activity_target[3] = CISLAND_TARGETACTIVITY_DFLT;
5155
	pi->activity_target[4] = CISLAND_TARGETACTIVITY_DFLT;
5156
	pi->activity_target[5] = CISLAND_TARGETACTIVITY_DFLT;
5157
	pi->activity_target[6] = CISLAND_TARGETACTIVITY_DFLT;
5158
	pi->activity_target[7] = CISLAND_TARGETACTIVITY_DFLT;
5159
 
5160
	pi->mclk_activity_target = CISLAND_MCLK_TARGETACTIVITY_DFLT;
5161
 
5162
	pi->sclk_dpm_key_disabled = 0;
5163
	pi->mclk_dpm_key_disabled = 0;
5164
	pi->pcie_dpm_key_disabled = 0;
5165
 
5166
	/* mclk dpm is unstable on some R7 260X cards with the old mc ucode */
5167
	if ((rdev->pdev->device == 0x6658) &&
5168
	    (rdev->mc_fw->size == (BONAIRE_MC_UCODE_SIZE * 4))) {
5169
		pi->mclk_dpm_key_disabled = 1;
5170
	}
5171
 
5172
	pi->caps_sclk_ds = true;
5173
 
5174
	pi->mclk_strobe_mode_threshold = 40000;
5175
	pi->mclk_stutter_mode_threshold = 40000;
5176
	pi->mclk_edc_enable_threshold = 40000;
5177
	pi->mclk_edc_wr_enable_threshold = 40000;
5178
 
5179
	ci_initialize_powertune_defaults(rdev);
5180
 
5181
	pi->caps_fps = false;
5182
 
5183
	pi->caps_sclk_throttle_low_notification = false;
5184
 
5185
	pi->caps_uvd_dpm = true;
5186
	pi->caps_vce_dpm = true;
5187
 
5188
        ci_get_leakage_voltages(rdev);
5189
        ci_patch_dependency_tables_with_leakage(rdev);
5190
        ci_set_private_data_variables_based_on_pptable(rdev);
5191
 
5192
	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries =
5193
		kzalloc(4 * sizeof(struct radeon_clock_voltage_dependency_entry), GFP_KERNEL);
5194
	if (!rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries) {
5195
		ci_dpm_fini(rdev);
5196
		return -ENOMEM;
5197
	}
5198
	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.count = 4;
5199
	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].clk = 0;
5200
	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].v = 0;
5201
	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].clk = 36000;
5202
	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].v = 720;
5203
	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].clk = 54000;
5204
	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].v = 810;
5205
	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].clk = 72000;
5206
	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].v = 900;
5207
 
5208
	rdev->pm.dpm.dyn_state.mclk_sclk_ratio = 4;
5209
	rdev->pm.dpm.dyn_state.sclk_mclk_delta = 15000;
5210
	rdev->pm.dpm.dyn_state.vddc_vddci_delta = 200;
5211
 
5212
	rdev->pm.dpm.dyn_state.valid_sclk_values.count = 0;
5213
	rdev->pm.dpm.dyn_state.valid_sclk_values.values = NULL;
5214
	rdev->pm.dpm.dyn_state.valid_mclk_values.count = 0;
5215
	rdev->pm.dpm.dyn_state.valid_mclk_values.values = NULL;
5216
 
5217
	if (rdev->family == CHIP_HAWAII) {
5218
		pi->thermal_temp_setting.temperature_low = 94500;
5219
		pi->thermal_temp_setting.temperature_high = 95000;
5220
		pi->thermal_temp_setting.temperature_shutdown = 104000;
5221
	} else {
5222
		pi->thermal_temp_setting.temperature_low = 99500;
5223
		pi->thermal_temp_setting.temperature_high = 100000;
5224
		pi->thermal_temp_setting.temperature_shutdown = 104000;
5225
	}
5226
 
5227
	pi->uvd_enabled = false;
5228
 
5229
	pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_NONE;
5230
	pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_NONE;
5231
	pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_NONE;
5232
	if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_GPIO_LUT))
5233
		pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
5234
	else if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2))
5235
		pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
5236
 
5237
	if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL) {
5238
		if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT))
5239
			pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
5240
		else if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_SVID2))
5241
			pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
5242
		else
5243
			rdev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL;
5244
        }
5245
 
5246
	if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_MVDDCONTROL) {
5247
		if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT))
5248
			pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
5249
		else if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2))
5250
			pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
5251
		else
5252
			rdev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_MVDDCONTROL;
5253
	}
5254
 
5255
	pi->vddc_phase_shed_control = true;
5256
 
5257
#if defined(CONFIG_ACPI)
5258
	pi->pcie_performance_request =
5259
		radeon_acpi_is_pcie_performance_request_supported(rdev);
5260
#else
5261
	pi->pcie_performance_request = false;
5262
#endif
5263
 
5264
	if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size,
5265
                                   &frev, &crev, &data_offset)) {
5266
		pi->caps_sclk_ss_support = true;
5267
		pi->caps_mclk_ss_support = true;
5268
		pi->dynamic_ss = true;
5269
	} else {
5270
		pi->caps_sclk_ss_support = false;
5271
		pi->caps_mclk_ss_support = false;
5272
		pi->dynamic_ss = true;
5273
	}
5274
 
5275
	if (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE)
5276
		pi->thermal_protection = true;
5277
	else
5278
		pi->thermal_protection = false;
5279
 
5280
	pi->caps_dynamic_ac_timing = true;
5281
 
5282
	pi->uvd_power_gated = false;
5283
 
5284
	/* make sure dc limits are valid */
5285
	if ((rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk == 0) ||
5286
	    (rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk == 0))
5287
		rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc =
5288
			rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
5289
 
5290
	return 0;
5291
}
5292
 
5293
void ci_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
5294
						    struct seq_file *m)
5295
{
5296
	u32 sclk = ci_get_average_sclk_freq(rdev);
5297
	u32 mclk = ci_get_average_mclk_freq(rdev);
5298
 
5299
	seq_printf(m, "power level avg    sclk: %u mclk: %u\n",
5300
		   sclk, mclk);
5301
}
5302
 
5303
void ci_dpm_print_power_state(struct radeon_device *rdev,
5304
			      struct radeon_ps *rps)
5305
{
5306
	struct ci_ps *ps = ci_get_ps(rps);
5307
	struct ci_pl *pl;
5308
	int i;
5309
 
5310
	r600_dpm_print_class_info(rps->class, rps->class2);
5311
	r600_dpm_print_cap_info(rps->caps);
5312
	printk("\tuvd    vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
5313
	for (i = 0; i < ps->performance_level_count; i++) {
5314
		pl = &ps->performance_levels[i];
5315
		printk("\t\tpower level %d    sclk: %u mclk: %u pcie gen: %u pcie lanes: %u\n",
5316
		       i, pl->sclk, pl->mclk, pl->pcie_gen + 1, pl->pcie_lane);
5317
	}
5318
	r600_dpm_print_ps_status(rdev, rps);
5319
}
5320
 
5321
u32 ci_dpm_get_sclk(struct radeon_device *rdev, bool low)
5322
{
5323
	struct ci_power_info *pi = ci_get_pi(rdev);
5324
	struct ci_ps *requested_state = ci_get_ps(&pi->requested_rps);
5325
 
5326
	if (low)
5327
		return requested_state->performance_levels[0].sclk;
5328
	else
5329
		return requested_state->performance_levels[requested_state->performance_level_count - 1].sclk;
5330
}
5331
 
5332
u32 ci_dpm_get_mclk(struct radeon_device *rdev, bool low)
5333
{
5334
	struct ci_power_info *pi = ci_get_pi(rdev);
5335
	struct ci_ps *requested_state = ci_get_ps(&pi->requested_rps);
5336
 
5337
	if (low)
5338
		return requested_state->performance_levels[0].mclk;
5339
	else
5340
		return requested_state->performance_levels[requested_state->performance_level_count - 1].mclk;
5341
}