Subversion Repositories Kolibri OS

Rev

Rev 5271 | Go to most recent revision | Details | Last modification | View Log | RSS feed

Rev Author Line No. Line
5078 serge 1
/*
2
 * Copyright 2011 Advanced Micro Devices, Inc.
3
 *
4
 * Permission is hereby granted, free of charge, to any person obtaining a
5
 * copy of this software and associated documentation files (the "Software"),
6
 * to deal in the Software without restriction, including without limitation
7
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
 * and/or sell copies of the Software, and to permit persons to whom the
9
 * Software is furnished to do so, subject to the following conditions:
10
 *
11
 * The above copyright notice and this permission notice shall be included in
12
 * all copies or substantial portions of the Software.
13
 *
14
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20
 * OTHER DEALINGS IN THE SOFTWARE.
21
 *
22
 * Authors: Alex Deucher
23
 */
24
 
25
#include "drmP.h"
26
#include "radeon.h"
27
#include "r600d.h"
28
#include "r600_dpm.h"
29
#include "atom.h"
30
 
31
const u32 r600_utc[R600_PM_NUMBER_OF_TC] =
32
{
33
	R600_UTC_DFLT_00,
34
	R600_UTC_DFLT_01,
35
	R600_UTC_DFLT_02,
36
	R600_UTC_DFLT_03,
37
	R600_UTC_DFLT_04,
38
	R600_UTC_DFLT_05,
39
	R600_UTC_DFLT_06,
40
	R600_UTC_DFLT_07,
41
	R600_UTC_DFLT_08,
42
	R600_UTC_DFLT_09,
43
	R600_UTC_DFLT_10,
44
	R600_UTC_DFLT_11,
45
	R600_UTC_DFLT_12,
46
	R600_UTC_DFLT_13,
47
	R600_UTC_DFLT_14,
48
};
49
 
50
const u32 r600_dtc[R600_PM_NUMBER_OF_TC] =
51
{
52
	R600_DTC_DFLT_00,
53
	R600_DTC_DFLT_01,
54
	R600_DTC_DFLT_02,
55
	R600_DTC_DFLT_03,
56
	R600_DTC_DFLT_04,
57
	R600_DTC_DFLT_05,
58
	R600_DTC_DFLT_06,
59
	R600_DTC_DFLT_07,
60
	R600_DTC_DFLT_08,
61
	R600_DTC_DFLT_09,
62
	R600_DTC_DFLT_10,
63
	R600_DTC_DFLT_11,
64
	R600_DTC_DFLT_12,
65
	R600_DTC_DFLT_13,
66
	R600_DTC_DFLT_14,
67
};
68
 
69
void r600_dpm_print_class_info(u32 class, u32 class2)
70
{
71
	printk("\tui class: ");
72
	switch (class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
73
	case ATOM_PPLIB_CLASSIFICATION_UI_NONE:
74
	default:
75
		printk("none\n");
76
		break;
77
	case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
78
		printk("battery\n");
79
		break;
80
	case ATOM_PPLIB_CLASSIFICATION_UI_BALANCED:
81
		printk("balanced\n");
82
		break;
83
	case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
84
		printk("performance\n");
85
		break;
86
	}
87
	printk("\tinternal class: ");
88
	if (((class & ~ATOM_PPLIB_CLASSIFICATION_UI_MASK) == 0) &&
89
	    (class2 == 0))
90
		printk("none");
91
	else {
92
		if (class & ATOM_PPLIB_CLASSIFICATION_BOOT)
93
			printk("boot ");
94
		if (class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
95
			printk("thermal ");
96
		if (class & ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE)
97
			printk("limited_pwr ");
98
		if (class & ATOM_PPLIB_CLASSIFICATION_REST)
99
			printk("rest ");
100
		if (class & ATOM_PPLIB_CLASSIFICATION_FORCED)
101
			printk("forced ");
102
		if (class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
103
			printk("3d_perf ");
104
		if (class & ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE)
105
			printk("ovrdrv ");
106
		if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
107
			printk("uvd ");
108
		if (class & ATOM_PPLIB_CLASSIFICATION_3DLOW)
109
			printk("3d_low ");
110
		if (class & ATOM_PPLIB_CLASSIFICATION_ACPI)
111
			printk("acpi ");
112
		if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
113
			printk("uvd_hd2 ");
114
		if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
115
			printk("uvd_hd ");
116
		if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
117
			printk("uvd_sd ");
118
		if (class2 & ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2)
119
			printk("limited_pwr2 ");
120
		if (class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
121
			printk("ulv ");
122
		if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
123
			printk("uvd_mvc ");
124
	}
125
	printk("\n");
126
}
127
 
128
void r600_dpm_print_cap_info(u32 caps)
129
{
130
	printk("\tcaps: ");
131
	if (caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY)
132
		printk("single_disp ");
133
	if (caps & ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK)
134
		printk("video ");
135
	if (caps & ATOM_PPLIB_DISALLOW_ON_DC)
136
		printk("no_dc ");
137
	printk("\n");
138
}
139
 
140
void r600_dpm_print_ps_status(struct radeon_device *rdev,
141
			      struct radeon_ps *rps)
142
{
143
	printk("\tstatus: ");
144
	if (rps == rdev->pm.dpm.current_ps)
145
		printk("c ");
146
	if (rps == rdev->pm.dpm.requested_ps)
147
		printk("r ");
148
	if (rps == rdev->pm.dpm.boot_ps)
149
		printk("b ");
150
	printk("\n");
151
}
152
 
153
u32 r600_dpm_get_vblank_time(struct radeon_device *rdev)
154
{
155
	struct drm_device *dev = rdev->ddev;
156
	struct drm_crtc *crtc;
157
	struct radeon_crtc *radeon_crtc;
158
	u32 line_time_us, vblank_lines;
159
	u32 vblank_time_us = 0xffffffff; /* if the displays are off, vblank time is max */
160
 
161
	if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) {
162
		list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
163
			radeon_crtc = to_radeon_crtc(crtc);
164
			if (crtc->enabled && radeon_crtc->enabled && radeon_crtc->hw_mode.clock) {
165
				line_time_us = (radeon_crtc->hw_mode.crtc_htotal * 1000) /
166
					radeon_crtc->hw_mode.clock;
167
				vblank_lines = radeon_crtc->hw_mode.crtc_vblank_end -
168
					radeon_crtc->hw_mode.crtc_vdisplay +
169
					(radeon_crtc->v_border * 2);
170
				vblank_time_us = vblank_lines * line_time_us;
171
				break;
172
			}
173
		}
174
	}
175
 
176
	return vblank_time_us;
177
}
178
 
179
u32 r600_dpm_get_vrefresh(struct radeon_device *rdev)
180
{
181
	struct drm_device *dev = rdev->ddev;
182
	struct drm_crtc *crtc;
183
	struct radeon_crtc *radeon_crtc;
184
	u32 vrefresh = 0;
185
 
186
	if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) {
187
		list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
188
			radeon_crtc = to_radeon_crtc(crtc);
189
			if (crtc->enabled && radeon_crtc->enabled && radeon_crtc->hw_mode.clock) {
190
				vrefresh = radeon_crtc->hw_mode.vrefresh;
191
				break;
192
			}
193
		}
194
	}
195
	return vrefresh;
196
}
197
 
198
void r600_calculate_u_and_p(u32 i, u32 r_c, u32 p_b,
199
			    u32 *p, u32 *u)
200
{
201
	u32 b_c = 0;
202
	u32 i_c;
203
	u32 tmp;
204
 
205
	i_c = (i * r_c) / 100;
206
	tmp = i_c >> p_b;
207
 
208
	while (tmp) {
209
		b_c++;
210
		tmp >>= 1;
211
	}
212
 
213
	*u = (b_c + 1) / 2;
214
	*p = i_c / (1 << (2 * (*u)));
215
}
216
 
217
int r600_calculate_at(u32 t, u32 h, u32 fh, u32 fl, u32 *tl, u32 *th)
218
{
219
	u32 k, a, ah, al;
220
	u32 t1;
221
 
222
	if ((fl == 0) || (fh == 0) || (fl > fh))
223
		return -EINVAL;
224
 
225
	k = (100 * fh) / fl;
226
	t1 = (t * (k - 100));
227
	a = (1000 * (100 * h + t1)) / (10000 + (t1 / 100));
228
	a = (a + 5) / 10;
229
	ah = ((a * t) + 5000) / 10000;
230
	al = a - ah;
231
 
232
	*th = t - ah;
233
	*tl = t + al;
234
 
235
	return 0;
236
}
237
 
238
void r600_gfx_clockgating_enable(struct radeon_device *rdev, bool enable)
239
{
240
	int i;
241
 
242
	if (enable) {
243
		WREG32_P(SCLK_PWRMGT_CNTL, DYN_GFX_CLK_OFF_EN, ~DYN_GFX_CLK_OFF_EN);
244
	} else {
245
		WREG32_P(SCLK_PWRMGT_CNTL, 0, ~DYN_GFX_CLK_OFF_EN);
246
 
247
		WREG32(CG_RLC_REQ_AND_RSP, 0x2);
248
 
249
		for (i = 0; i < rdev->usec_timeout; i++) {
250
			if (((RREG32(CG_RLC_REQ_AND_RSP) & CG_RLC_RSP_TYPE_MASK) >> CG_RLC_RSP_TYPE_SHIFT) == 1)
251
				break;
252
			udelay(1);
253
		}
254
 
255
		WREG32(CG_RLC_REQ_AND_RSP, 0x0);
256
 
257
		WREG32(GRBM_PWR_CNTL, 0x1);
258
		RREG32(GRBM_PWR_CNTL);
259
	}
260
}
261
 
262
void r600_dynamicpm_enable(struct radeon_device *rdev, bool enable)
263
{
264
	if (enable)
265
		WREG32_P(GENERAL_PWRMGT, GLOBAL_PWRMGT_EN, ~GLOBAL_PWRMGT_EN);
266
	else
267
		WREG32_P(GENERAL_PWRMGT, 0, ~GLOBAL_PWRMGT_EN);
268
}
269
 
270
void r600_enable_thermal_protection(struct radeon_device *rdev, bool enable)
271
{
272
	if (enable)
273
		WREG32_P(GENERAL_PWRMGT, 0, ~THERMAL_PROTECTION_DIS);
274
	else
275
		WREG32_P(GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, ~THERMAL_PROTECTION_DIS);
276
}
277
 
278
void r600_enable_acpi_pm(struct radeon_device *rdev)
279
{
280
	WREG32_P(GENERAL_PWRMGT, STATIC_PM_EN, ~STATIC_PM_EN);
281
}
282
 
283
void r600_enable_dynamic_pcie_gen2(struct radeon_device *rdev, bool enable)
284
{
285
	if (enable)
286
		WREG32_P(GENERAL_PWRMGT, ENABLE_GEN2PCIE, ~ENABLE_GEN2PCIE);
287
	else
288
		WREG32_P(GENERAL_PWRMGT, 0, ~ENABLE_GEN2PCIE);
289
}
290
 
291
bool r600_dynamicpm_enabled(struct radeon_device *rdev)
292
{
293
	if (RREG32(GENERAL_PWRMGT) & GLOBAL_PWRMGT_EN)
294
		return true;
295
	else
296
		return false;
297
}
298
 
299
void r600_enable_sclk_control(struct radeon_device *rdev, bool enable)
300
{
301
	if (enable)
302
		WREG32_P(SCLK_PWRMGT_CNTL, 0, ~SCLK_PWRMGT_OFF);
303
	else
304
		WREG32_P(SCLK_PWRMGT_CNTL, SCLK_PWRMGT_OFF, ~SCLK_PWRMGT_OFF);
305
}
306
 
307
void r600_enable_mclk_control(struct radeon_device *rdev, bool enable)
308
{
309
	if (enable)
310
		WREG32_P(MCLK_PWRMGT_CNTL, 0, ~MPLL_PWRMGT_OFF);
311
	else
312
		WREG32_P(MCLK_PWRMGT_CNTL, MPLL_PWRMGT_OFF, ~MPLL_PWRMGT_OFF);
313
}
314
 
315
void r600_enable_spll_bypass(struct radeon_device *rdev, bool enable)
316
{
317
	if (enable)
318
		WREG32_P(CG_SPLL_FUNC_CNTL, SPLL_BYPASS_EN, ~SPLL_BYPASS_EN);
319
	else
320
		WREG32_P(CG_SPLL_FUNC_CNTL, 0, ~SPLL_BYPASS_EN);
321
}
322
 
323
void r600_wait_for_spll_change(struct radeon_device *rdev)
324
{
325
	int i;
326
 
327
	for (i = 0; i < rdev->usec_timeout; i++) {
328
		if (RREG32(CG_SPLL_FUNC_CNTL) & SPLL_CHG_STATUS)
329
			break;
330
		udelay(1);
331
	}
332
}
333
 
334
void r600_set_bsp(struct radeon_device *rdev, u32 u, u32 p)
335
{
336
	WREG32(CG_BSP, BSP(p) | BSU(u));
337
}
338
 
339
void r600_set_at(struct radeon_device *rdev,
340
		 u32 l_to_m, u32 m_to_h,
341
		 u32 h_to_m, u32 m_to_l)
342
{
343
	WREG32(CG_RT, FLS(l_to_m) | FMS(m_to_h));
344
	WREG32(CG_LT, FHS(h_to_m) | FMS(m_to_l));
345
}
346
 
347
void r600_set_tc(struct radeon_device *rdev,
348
		 u32 index, u32 u_t, u32 d_t)
349
{
350
	WREG32(CG_FFCT_0 + (index * 4), UTC_0(u_t) | DTC_0(d_t));
351
}
352
 
353
void r600_select_td(struct radeon_device *rdev,
354
		    enum r600_td td)
355
{
356
	if (td == R600_TD_AUTO)
357
		WREG32_P(SCLK_PWRMGT_CNTL, 0, ~FIR_FORCE_TREND_SEL);
358
	else
359
		WREG32_P(SCLK_PWRMGT_CNTL, FIR_FORCE_TREND_SEL, ~FIR_FORCE_TREND_SEL);
360
	if (td == R600_TD_UP)
361
		WREG32_P(SCLK_PWRMGT_CNTL, 0, ~FIR_TREND_MODE);
362
	if (td == R600_TD_DOWN)
363
		WREG32_P(SCLK_PWRMGT_CNTL, FIR_TREND_MODE, ~FIR_TREND_MODE);
364
}
365
 
366
void r600_set_vrc(struct radeon_device *rdev, u32 vrv)
367
{
368
	WREG32(CG_FTV, vrv);
369
}
370
 
371
void r600_set_tpu(struct radeon_device *rdev, u32 u)
372
{
373
	WREG32_P(CG_TPC, TPU(u), ~TPU_MASK);
374
}
375
 
376
void r600_set_tpc(struct radeon_device *rdev, u32 c)
377
{
378
	WREG32_P(CG_TPC, TPCC(c), ~TPCC_MASK);
379
}
380
 
381
void r600_set_sstu(struct radeon_device *rdev, u32 u)
382
{
383
	WREG32_P(CG_SSP, CG_SSTU(u), ~CG_SSTU_MASK);
384
}
385
 
386
void r600_set_sst(struct radeon_device *rdev, u32 t)
387
{
388
	WREG32_P(CG_SSP, CG_SST(t), ~CG_SST_MASK);
389
}
390
 
391
void r600_set_git(struct radeon_device *rdev, u32 t)
392
{
393
	WREG32_P(CG_GIT, CG_GICST(t), ~CG_GICST_MASK);
394
}
395
 
396
void r600_set_fctu(struct radeon_device *rdev, u32 u)
397
{
398
	WREG32_P(CG_FC_T, FC_TU(u), ~FC_TU_MASK);
399
}
400
 
401
void r600_set_fct(struct radeon_device *rdev, u32 t)
402
{
403
	WREG32_P(CG_FC_T, FC_T(t), ~FC_T_MASK);
404
}
405
 
406
void r600_set_ctxcgtt3d_rphc(struct radeon_device *rdev, u32 p)
407
{
408
	WREG32_P(CG_CTX_CGTT3D_R, PHC(p), ~PHC_MASK);
409
}
410
 
411
void r600_set_ctxcgtt3d_rsdc(struct radeon_device *rdev, u32 s)
412
{
413
	WREG32_P(CG_CTX_CGTT3D_R, SDC(s), ~SDC_MASK);
414
}
415
 
416
void r600_set_vddc3d_oorsu(struct radeon_device *rdev, u32 u)
417
{
418
	WREG32_P(CG_VDDC3D_OOR, SU(u), ~SU_MASK);
419
}
420
 
421
void r600_set_vddc3d_oorphc(struct radeon_device *rdev, u32 p)
422
{
423
	WREG32_P(CG_VDDC3D_OOR, PHC(p), ~PHC_MASK);
424
}
425
 
426
void r600_set_vddc3d_oorsdc(struct radeon_device *rdev, u32 s)
427
{
428
	WREG32_P(CG_VDDC3D_OOR, SDC(s), ~SDC_MASK);
429
}
430
 
431
void r600_set_mpll_lock_time(struct radeon_device *rdev, u32 lock_time)
432
{
433
	WREG32_P(MPLL_TIME, MPLL_LOCK_TIME(lock_time), ~MPLL_LOCK_TIME_MASK);
434
}
435
 
436
void r600_set_mpll_reset_time(struct radeon_device *rdev, u32 reset_time)
437
{
438
	WREG32_P(MPLL_TIME, MPLL_RESET_TIME(reset_time), ~MPLL_RESET_TIME_MASK);
439
}
440
 
441
void r600_engine_clock_entry_enable(struct radeon_device *rdev,
442
				    u32 index, bool enable)
443
{
444
	if (enable)
445
		WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART2 + (index * 4 * 2),
446
			 STEP_0_SPLL_ENTRY_VALID, ~STEP_0_SPLL_ENTRY_VALID);
447
	else
448
		WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART2 + (index * 4 * 2),
449
			 0, ~STEP_0_SPLL_ENTRY_VALID);
450
}
451
 
452
void r600_engine_clock_entry_enable_pulse_skipping(struct radeon_device *rdev,
453
						   u32 index, bool enable)
454
{
455
	if (enable)
456
		WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART2 + (index * 4 * 2),
457
			 STEP_0_SPLL_STEP_ENABLE, ~STEP_0_SPLL_STEP_ENABLE);
458
	else
459
		WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART2 + (index * 4 * 2),
460
			 0, ~STEP_0_SPLL_STEP_ENABLE);
461
}
462
 
463
void r600_engine_clock_entry_enable_post_divider(struct radeon_device *rdev,
464
						 u32 index, bool enable)
465
{
466
	if (enable)
467
		WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART2 + (index * 4 * 2),
468
			 STEP_0_POST_DIV_EN, ~STEP_0_POST_DIV_EN);
469
	else
470
		WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART2 + (index * 4 * 2),
471
			 0, ~STEP_0_POST_DIV_EN);
472
}
473
 
474
void r600_engine_clock_entry_set_post_divider(struct radeon_device *rdev,
475
					      u32 index, u32 divider)
476
{
477
	WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART1 + (index * 4 * 2),
478
		 STEP_0_SPLL_POST_DIV(divider), ~STEP_0_SPLL_POST_DIV_MASK);
479
}
480
 
481
void r600_engine_clock_entry_set_reference_divider(struct radeon_device *rdev,
482
						   u32 index, u32 divider)
483
{
484
	WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART1 + (index * 4 * 2),
485
		 STEP_0_SPLL_REF_DIV(divider), ~STEP_0_SPLL_REF_DIV_MASK);
486
}
487
 
488
void r600_engine_clock_entry_set_feedback_divider(struct radeon_device *rdev,
489
						  u32 index, u32 divider)
490
{
491
	WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART1 + (index * 4 * 2),
492
		 STEP_0_SPLL_FB_DIV(divider), ~STEP_0_SPLL_FB_DIV_MASK);
493
}
494
 
495
void r600_engine_clock_entry_set_step_time(struct radeon_device *rdev,
496
					   u32 index, u32 step_time)
497
{
498
	WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART1 + (index * 4 * 2),
499
		 STEP_0_SPLL_STEP_TIME(step_time), ~STEP_0_SPLL_STEP_TIME_MASK);
500
}
501
 
502
void r600_vid_rt_set_ssu(struct radeon_device *rdev, u32 u)
503
{
504
	WREG32_P(VID_RT, SSTU(u), ~SSTU_MASK);
505
}
506
 
507
void r600_vid_rt_set_vru(struct radeon_device *rdev, u32 u)
508
{
509
	WREG32_P(VID_RT, VID_CRTU(u), ~VID_CRTU_MASK);
510
}
511
 
512
void r600_vid_rt_set_vrt(struct radeon_device *rdev, u32 rt)
513
{
514
	WREG32_P(VID_RT, VID_CRT(rt), ~VID_CRT_MASK);
515
}
516
 
517
void r600_voltage_control_enable_pins(struct radeon_device *rdev,
518
				      u64 mask)
519
{
520
	WREG32(LOWER_GPIO_ENABLE, mask & 0xffffffff);
521
	WREG32(UPPER_GPIO_ENABLE, upper_32_bits(mask));
522
}
523
 
524
 
525
void r600_voltage_control_program_voltages(struct radeon_device *rdev,
526
					   enum r600_power_level index, u64 pins)
527
{
528
	u32 tmp, mask;
529
	u32 ix = 3 - (3 & index);
530
 
531
	WREG32(CTXSW_VID_LOWER_GPIO_CNTL + (ix * 4), pins & 0xffffffff);
532
 
533
	mask = 7 << (3 * ix);
534
	tmp = RREG32(VID_UPPER_GPIO_CNTL);
535
	tmp = (tmp & ~mask) | ((pins >> (32 - (3 * ix))) & mask);
536
	WREG32(VID_UPPER_GPIO_CNTL, tmp);
537
}
538
 
539
void r600_voltage_control_deactivate_static_control(struct radeon_device *rdev,
540
						    u64 mask)
541
{
542
	u32 gpio;
543
 
544
	gpio = RREG32(GPIOPAD_MASK);
545
	gpio &= ~mask;
546
	WREG32(GPIOPAD_MASK, gpio);
547
 
548
	gpio = RREG32(GPIOPAD_EN);
549
	gpio &= ~mask;
550
	WREG32(GPIOPAD_EN, gpio);
551
 
552
	gpio = RREG32(GPIOPAD_A);
553
	gpio &= ~mask;
554
	WREG32(GPIOPAD_A, gpio);
555
}
556
 
557
void r600_power_level_enable(struct radeon_device *rdev,
558
			     enum r600_power_level index, bool enable)
559
{
560
	u32 ix = 3 - (3 & index);
561
 
562
	if (enable)
563
		WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4), CTXSW_FREQ_STATE_ENABLE,
564
			 ~CTXSW_FREQ_STATE_ENABLE);
565
	else
566
		WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4), 0,
567
			 ~CTXSW_FREQ_STATE_ENABLE);
568
}
569
 
570
void r600_power_level_set_voltage_index(struct radeon_device *rdev,
571
					enum r600_power_level index, u32 voltage_index)
572
{
573
	u32 ix = 3 - (3 & index);
574
 
575
	WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4),
576
		 CTXSW_FREQ_VIDS_CFG_INDEX(voltage_index), ~CTXSW_FREQ_VIDS_CFG_INDEX_MASK);
577
}
578
 
579
void r600_power_level_set_mem_clock_index(struct radeon_device *rdev,
580
					  enum r600_power_level index, u32 mem_clock_index)
581
{
582
	u32 ix = 3 - (3 & index);
583
 
584
	WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4),
585
		 CTXSW_FREQ_MCLK_CFG_INDEX(mem_clock_index), ~CTXSW_FREQ_MCLK_CFG_INDEX_MASK);
586
}
587
 
588
void r600_power_level_set_eng_clock_index(struct radeon_device *rdev,
589
					  enum r600_power_level index, u32 eng_clock_index)
590
{
591
	u32 ix = 3 - (3 & index);
592
 
593
	WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4),
594
		 CTXSW_FREQ_SCLK_CFG_INDEX(eng_clock_index), ~CTXSW_FREQ_SCLK_CFG_INDEX_MASK);
595
}
596
 
597
void r600_power_level_set_watermark_id(struct radeon_device *rdev,
598
				       enum r600_power_level index,
599
				       enum r600_display_watermark watermark_id)
600
{
601
	u32 ix = 3 - (3 & index);
602
	u32 tmp = 0;
603
 
604
	if (watermark_id == R600_DISPLAY_WATERMARK_HIGH)
605
		tmp = CTXSW_FREQ_DISPLAY_WATERMARK;
606
	WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4), tmp, ~CTXSW_FREQ_DISPLAY_WATERMARK);
607
}
608
 
609
void r600_power_level_set_pcie_gen2(struct radeon_device *rdev,
610
				    enum r600_power_level index, bool compatible)
611
{
612
	u32 ix = 3 - (3 & index);
613
	u32 tmp = 0;
614
 
615
	if (compatible)
616
		tmp = CTXSW_FREQ_GEN2PCIE_VOLT;
617
	WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4), tmp, ~CTXSW_FREQ_GEN2PCIE_VOLT);
618
}
619
 
620
enum r600_power_level r600_power_level_get_current_index(struct radeon_device *rdev)
621
{
622
	u32 tmp;
623
 
624
	tmp = RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_PROFILE_INDEX_MASK;
625
	tmp >>= CURRENT_PROFILE_INDEX_SHIFT;
626
	return tmp;
627
}
628
 
629
enum r600_power_level r600_power_level_get_target_index(struct radeon_device *rdev)
630
{
631
	u32 tmp;
632
 
633
	tmp = RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & TARGET_PROFILE_INDEX_MASK;
634
	tmp >>= TARGET_PROFILE_INDEX_SHIFT;
635
	return tmp;
636
}
637
 
638
void r600_power_level_set_enter_index(struct radeon_device *rdev,
639
				      enum r600_power_level index)
640
{
641
	WREG32_P(TARGET_AND_CURRENT_PROFILE_INDEX, DYN_PWR_ENTER_INDEX(index),
642
		 ~DYN_PWR_ENTER_INDEX_MASK);
643
}
644
 
645
void r600_wait_for_power_level_unequal(struct radeon_device *rdev,
646
				       enum r600_power_level index)
647
{
648
	int i;
649
 
650
	for (i = 0; i < rdev->usec_timeout; i++) {
651
		if (r600_power_level_get_target_index(rdev) != index)
652
			break;
653
		udelay(1);
654
	}
655
 
656
	for (i = 0; i < rdev->usec_timeout; i++) {
657
		if (r600_power_level_get_current_index(rdev) != index)
658
			break;
659
		udelay(1);
660
	}
661
}
662
 
663
void r600_wait_for_power_level(struct radeon_device *rdev,
664
			       enum r600_power_level index)
665
{
666
	int i;
667
 
668
	for (i = 0; i < rdev->usec_timeout; i++) {
669
		if (r600_power_level_get_target_index(rdev) == index)
670
			break;
671
		udelay(1);
672
	}
673
 
674
	for (i = 0; i < rdev->usec_timeout; i++) {
675
		if (r600_power_level_get_current_index(rdev) == index)
676
			break;
677
		udelay(1);
678
	}
679
}
680
 
681
void r600_start_dpm(struct radeon_device *rdev)
682
{
683
	r600_enable_sclk_control(rdev, false);
684
	r600_enable_mclk_control(rdev, false);
685
 
686
	r600_dynamicpm_enable(rdev, true);
687
 
688
	radeon_wait_for_vblank(rdev, 0);
689
	radeon_wait_for_vblank(rdev, 1);
690
 
691
	r600_enable_spll_bypass(rdev, true);
692
	r600_wait_for_spll_change(rdev);
693
	r600_enable_spll_bypass(rdev, false);
694
	r600_wait_for_spll_change(rdev);
695
 
696
	r600_enable_spll_bypass(rdev, true);
697
	r600_wait_for_spll_change(rdev);
698
	r600_enable_spll_bypass(rdev, false);
699
	r600_wait_for_spll_change(rdev);
700
 
701
	r600_enable_sclk_control(rdev, true);
702
	r600_enable_mclk_control(rdev, true);
703
}
704
 
705
void r600_stop_dpm(struct radeon_device *rdev)
706
{
707
	r600_dynamicpm_enable(rdev, false);
708
}
709
 
710
int r600_dpm_pre_set_power_state(struct radeon_device *rdev)
711
{
712
	return 0;
713
}
714
 
715
void r600_dpm_post_set_power_state(struct radeon_device *rdev)
716
{
717
 
718
}
719
 
720
bool r600_is_uvd_state(u32 class, u32 class2)
721
{
722
	if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
723
		return true;
724
	if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
725
		return true;
726
	if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
727
		return true;
728
	if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
729
		return true;
730
	if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
731
		return true;
732
	return false;
733
}
734
 
735
static int r600_set_thermal_temperature_range(struct radeon_device *rdev,
736
					      int min_temp, int max_temp)
737
{
738
	int low_temp = 0 * 1000;
739
	int high_temp = 255 * 1000;
740
 
741
	if (low_temp < min_temp)
742
		low_temp = min_temp;
743
	if (high_temp > max_temp)
744
		high_temp = max_temp;
745
	if (high_temp < low_temp) {
746
		DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp);
747
		return -EINVAL;
748
	}
749
 
750
	WREG32_P(CG_THERMAL_INT, DIG_THERM_INTH(high_temp / 1000), ~DIG_THERM_INTH_MASK);
751
	WREG32_P(CG_THERMAL_INT, DIG_THERM_INTL(low_temp / 1000), ~DIG_THERM_INTL_MASK);
752
	WREG32_P(CG_THERMAL_CTRL, DIG_THERM_DPM(high_temp / 1000), ~DIG_THERM_DPM_MASK);
753
 
754
	rdev->pm.dpm.thermal.min_temp = low_temp;
755
	rdev->pm.dpm.thermal.max_temp = high_temp;
756
 
757
	return 0;
758
}
759
 
760
bool r600_is_internal_thermal_sensor(enum radeon_int_thermal_type sensor)
761
{
762
	switch (sensor) {
763
	case THERMAL_TYPE_RV6XX:
764
	case THERMAL_TYPE_RV770:
765
	case THERMAL_TYPE_EVERGREEN:
766
	case THERMAL_TYPE_SUMO:
767
	case THERMAL_TYPE_NI:
768
	case THERMAL_TYPE_SI:
769
	case THERMAL_TYPE_CI:
770
	case THERMAL_TYPE_KV:
771
		return true;
772
	case THERMAL_TYPE_ADT7473_WITH_INTERNAL:
773
	case THERMAL_TYPE_EMC2103_WITH_INTERNAL:
774
		return false; /* need special handling */
775
	case THERMAL_TYPE_NONE:
776
	case THERMAL_TYPE_EXTERNAL:
777
	case THERMAL_TYPE_EXTERNAL_GPIO:
778
	default:
779
		return false;
780
	}
781
}
782
 
783
int r600_dpm_late_enable(struct radeon_device *rdev)
784
{
785
	int ret;
786
 
787
	if (rdev->irq.installed &&
788
	    r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
789
		ret = r600_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
790
		if (ret)
791
			return ret;
792
		rdev->irq.dpm_thermal = true;
793
		radeon_irq_set(rdev);
794
	}
795
 
796
	return 0;
797
}
798
 
799
union power_info {
800
	struct _ATOM_POWERPLAY_INFO info;
801
	struct _ATOM_POWERPLAY_INFO_V2 info_2;
802
	struct _ATOM_POWERPLAY_INFO_V3 info_3;
803
	struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
804
	struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
805
	struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
806
	struct _ATOM_PPLIB_POWERPLAYTABLE4 pplib4;
807
	struct _ATOM_PPLIB_POWERPLAYTABLE5 pplib5;
808
};
809
 
810
union fan_info {
811
	struct _ATOM_PPLIB_FANTABLE fan;
812
	struct _ATOM_PPLIB_FANTABLE2 fan2;
813
};
814
 
815
static int r600_parse_clk_voltage_dep_table(struct radeon_clock_voltage_dependency_table *radeon_table,
816
					    ATOM_PPLIB_Clock_Voltage_Dependency_Table *atom_table)
817
{
818
	u32 size = atom_table->ucNumEntries *
819
		sizeof(struct radeon_clock_voltage_dependency_entry);
820
	int i;
821
	ATOM_PPLIB_Clock_Voltage_Dependency_Record *entry;
822
 
823
	radeon_table->entries = kzalloc(size, GFP_KERNEL);
824
	if (!radeon_table->entries)
825
		return -ENOMEM;
826
 
827
	entry = &atom_table->entries[0];
828
	for (i = 0; i < atom_table->ucNumEntries; i++) {
829
		radeon_table->entries[i].clk = le16_to_cpu(entry->usClockLow) |
830
			(entry->ucClockHigh << 16);
831
		radeon_table->entries[i].v = le16_to_cpu(entry->usVoltage);
832
		entry = (ATOM_PPLIB_Clock_Voltage_Dependency_Record *)
833
			((u8 *)entry + sizeof(ATOM_PPLIB_Clock_Voltage_Dependency_Record));
834
	}
835
	radeon_table->count = atom_table->ucNumEntries;
836
 
837
	return 0;
838
}
839
 
840
int r600_get_platform_caps(struct radeon_device *rdev)
841
{
842
	struct radeon_mode_info *mode_info = &rdev->mode_info;
843
	union power_info *power_info;
844
	int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
845
        u16 data_offset;
846
	u8 frev, crev;
847
 
848
	if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
849
				   &frev, &crev, &data_offset))
850
		return -EINVAL;
851
	power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
852
 
853
	rdev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps);
854
	rdev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime);
855
	rdev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime);
856
 
857
	return 0;
858
}
859
 
860
/* sizeof(ATOM_PPLIB_EXTENDEDHEADER) */
861
#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2 12
862
#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3 14
863
#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4 16
864
#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5 18
865
#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6 20
866
#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7 22
867
 
868
int r600_parse_extended_power_table(struct radeon_device *rdev)
869
{
870
	struct radeon_mode_info *mode_info = &rdev->mode_info;
871
	union power_info *power_info;
872
	union fan_info *fan_info;
873
	ATOM_PPLIB_Clock_Voltage_Dependency_Table *dep_table;
874
	int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
875
        u16 data_offset;
876
	u8 frev, crev;
877
	int ret, i;
878
 
879
	if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
880
				   &frev, &crev, &data_offset))
881
		return -EINVAL;
882
	power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
883
 
884
	/* fan table */
885
	if (le16_to_cpu(power_info->pplib.usTableSize) >=
886
	    sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) {
887
		if (power_info->pplib3.usFanTableOffset) {
888
			fan_info = (union fan_info *)(mode_info->atom_context->bios + data_offset +
889
						      le16_to_cpu(power_info->pplib3.usFanTableOffset));
890
			rdev->pm.dpm.fan.t_hyst = fan_info->fan.ucTHyst;
891
			rdev->pm.dpm.fan.t_min = le16_to_cpu(fan_info->fan.usTMin);
892
			rdev->pm.dpm.fan.t_med = le16_to_cpu(fan_info->fan.usTMed);
893
			rdev->pm.dpm.fan.t_high = le16_to_cpu(fan_info->fan.usTHigh);
894
			rdev->pm.dpm.fan.pwm_min = le16_to_cpu(fan_info->fan.usPWMMin);
895
			rdev->pm.dpm.fan.pwm_med = le16_to_cpu(fan_info->fan.usPWMMed);
896
			rdev->pm.dpm.fan.pwm_high = le16_to_cpu(fan_info->fan.usPWMHigh);
897
			if (fan_info->fan.ucFanTableFormat >= 2)
898
				rdev->pm.dpm.fan.t_max = le16_to_cpu(fan_info->fan2.usTMax);
899
			else
900
				rdev->pm.dpm.fan.t_max = 10900;
901
			rdev->pm.dpm.fan.cycle_delay = 100000;
902
			rdev->pm.dpm.fan.ucode_fan_control = true;
903
		}
904
	}
905
 
906
	/* clock dependancy tables, shedding tables */
907
	if (le16_to_cpu(power_info->pplib.usTableSize) >=
908
	    sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE4)) {
909
		if (power_info->pplib4.usVddcDependencyOnSCLKOffset) {
910
			dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
911
				(mode_info->atom_context->bios + data_offset +
912
				 le16_to_cpu(power_info->pplib4.usVddcDependencyOnSCLKOffset));
913
			ret = r600_parse_clk_voltage_dep_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
914
							       dep_table);
915
			if (ret)
916
				return ret;
917
		}
918
		if (power_info->pplib4.usVddciDependencyOnMCLKOffset) {
919
			dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
920
				(mode_info->atom_context->bios + data_offset +
921
				 le16_to_cpu(power_info->pplib4.usVddciDependencyOnMCLKOffset));
922
			ret = r600_parse_clk_voltage_dep_table(&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
923
							       dep_table);
924
			if (ret) {
925
				kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries);
926
				return ret;
927
			}
928
		}
929
		if (power_info->pplib4.usVddcDependencyOnMCLKOffset) {
930
			dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
931
				(mode_info->atom_context->bios + data_offset +
932
				 le16_to_cpu(power_info->pplib4.usVddcDependencyOnMCLKOffset));
933
			ret = r600_parse_clk_voltage_dep_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
934
							       dep_table);
935
			if (ret) {
936
				kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries);
937
				kfree(rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries);
938
				return ret;
939
			}
940
		}
941
		if (power_info->pplib4.usMvddDependencyOnMCLKOffset) {
942
			dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
943
				(mode_info->atom_context->bios + data_offset +
944
				 le16_to_cpu(power_info->pplib4.usMvddDependencyOnMCLKOffset));
945
			ret = r600_parse_clk_voltage_dep_table(&rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
946
							       dep_table);
947
			if (ret) {
948
				kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries);
949
				kfree(rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries);
950
				kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries);
951
				return ret;
952
			}
953
		}
954
		if (power_info->pplib4.usMaxClockVoltageOnDCOffset) {
955
			ATOM_PPLIB_Clock_Voltage_Limit_Table *clk_v =
956
				(ATOM_PPLIB_Clock_Voltage_Limit_Table *)
957
				(mode_info->atom_context->bios + data_offset +
958
				 le16_to_cpu(power_info->pplib4.usMaxClockVoltageOnDCOffset));
959
			if (clk_v->ucNumEntries) {
960
				rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk =
961
					le16_to_cpu(clk_v->entries[0].usSclkLow) |
962
					(clk_v->entries[0].ucSclkHigh << 16);
963
				rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk =
964
					le16_to_cpu(clk_v->entries[0].usMclkLow) |
965
					(clk_v->entries[0].ucMclkHigh << 16);
966
				rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddc =
967
					le16_to_cpu(clk_v->entries[0].usVddc);
968
				rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddci =
969
					le16_to_cpu(clk_v->entries[0].usVddci);
970
			}
971
		}
972
		if (power_info->pplib4.usVddcPhaseShedLimitsTableOffset) {
973
			ATOM_PPLIB_PhaseSheddingLimits_Table *psl =
974
				(ATOM_PPLIB_PhaseSheddingLimits_Table *)
975
				(mode_info->atom_context->bios + data_offset +
976
				 le16_to_cpu(power_info->pplib4.usVddcPhaseShedLimitsTableOffset));
977
			ATOM_PPLIB_PhaseSheddingLimits_Record *entry;
978
 
979
			rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries =
980
				kzalloc(psl->ucNumEntries *
981
					sizeof(struct radeon_phase_shedding_limits_entry),
982
					GFP_KERNEL);
983
			if (!rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries) {
984
				r600_free_extended_power_table(rdev);
985
				return -ENOMEM;
986
			}
987
 
988
			entry = &psl->entries[0];
989
			for (i = 0; i < psl->ucNumEntries; i++) {
990
				rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].sclk =
991
					le16_to_cpu(entry->usSclkLow) | (entry->ucSclkHigh << 16);
992
				rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].mclk =
993
					le16_to_cpu(entry->usMclkLow) | (entry->ucMclkHigh << 16);
994
				rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].voltage =
995
					le16_to_cpu(entry->usVoltage);
996
				entry = (ATOM_PPLIB_PhaseSheddingLimits_Record *)
997
					((u8 *)entry + sizeof(ATOM_PPLIB_PhaseSheddingLimits_Record));
998
			}
999
			rdev->pm.dpm.dyn_state.phase_shedding_limits_table.count =
1000
				psl->ucNumEntries;
1001
		}
1002
	}
1003
 
1004
	/* cac data */
1005
	if (le16_to_cpu(power_info->pplib.usTableSize) >=
1006
	    sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE5)) {
1007
		rdev->pm.dpm.tdp_limit = le32_to_cpu(power_info->pplib5.ulTDPLimit);
1008
		rdev->pm.dpm.near_tdp_limit = le32_to_cpu(power_info->pplib5.ulNearTDPLimit);
1009
		rdev->pm.dpm.near_tdp_limit_adjusted = rdev->pm.dpm.near_tdp_limit;
1010
		rdev->pm.dpm.tdp_od_limit = le16_to_cpu(power_info->pplib5.usTDPODLimit);
1011
		if (rdev->pm.dpm.tdp_od_limit)
1012
			rdev->pm.dpm.power_control = true;
1013
		else
1014
			rdev->pm.dpm.power_control = false;
1015
		rdev->pm.dpm.tdp_adjustment = 0;
1016
		rdev->pm.dpm.sq_ramping_threshold = le32_to_cpu(power_info->pplib5.ulSQRampingThreshold);
1017
		rdev->pm.dpm.cac_leakage = le32_to_cpu(power_info->pplib5.ulCACLeakage);
1018
		rdev->pm.dpm.load_line_slope = le16_to_cpu(power_info->pplib5.usLoadLineSlope);
1019
		if (power_info->pplib5.usCACLeakageTableOffset) {
1020
			ATOM_PPLIB_CAC_Leakage_Table *cac_table =
1021
				(ATOM_PPLIB_CAC_Leakage_Table *)
1022
				(mode_info->atom_context->bios + data_offset +
1023
				 le16_to_cpu(power_info->pplib5.usCACLeakageTableOffset));
1024
			ATOM_PPLIB_CAC_Leakage_Record *entry;
1025
			u32 size = cac_table->ucNumEntries * sizeof(struct radeon_cac_leakage_table);
1026
			rdev->pm.dpm.dyn_state.cac_leakage_table.entries = kzalloc(size, GFP_KERNEL);
1027
			if (!rdev->pm.dpm.dyn_state.cac_leakage_table.entries) {
1028
				r600_free_extended_power_table(rdev);
1029
				return -ENOMEM;
1030
			}
1031
			entry = &cac_table->entries[0];
1032
			for (i = 0; i < cac_table->ucNumEntries; i++) {
1033
				if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
1034
					rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1 =
1035
						le16_to_cpu(entry->usVddc1);
1036
					rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2 =
1037
						le16_to_cpu(entry->usVddc2);
1038
					rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3 =
1039
						le16_to_cpu(entry->usVddc3);
1040
				} else {
1041
					rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc =
1042
						le16_to_cpu(entry->usVddc);
1043
					rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage =
1044
						le32_to_cpu(entry->ulLeakageValue);
1045
				}
1046
				entry = (ATOM_PPLIB_CAC_Leakage_Record *)
1047
					((u8 *)entry + sizeof(ATOM_PPLIB_CAC_Leakage_Record));
1048
			}
1049
			rdev->pm.dpm.dyn_state.cac_leakage_table.count = cac_table->ucNumEntries;
1050
		}
1051
	}
1052
 
1053
	/* ext tables */
1054
	if (le16_to_cpu(power_info->pplib.usTableSize) >=
1055
	    sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) {
1056
		ATOM_PPLIB_EXTENDEDHEADER *ext_hdr = (ATOM_PPLIB_EXTENDEDHEADER *)
1057
			(mode_info->atom_context->bios + data_offset +
1058
			 le16_to_cpu(power_info->pplib3.usExtendendedHeaderOffset));
1059
		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2) &&
1060
			ext_hdr->usVCETableOffset) {
1061
			VCEClockInfoArray *array = (VCEClockInfoArray *)
1062
				(mode_info->atom_context->bios + data_offset +
1063
                                 le16_to_cpu(ext_hdr->usVCETableOffset) + 1);
1064
			ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *limits =
1065
				(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *)
1066
				(mode_info->atom_context->bios + data_offset +
1067
				 le16_to_cpu(ext_hdr->usVCETableOffset) + 1 +
1068
				 1 + array->ucNumEntries * sizeof(VCEClockInfo));
1069
			ATOM_PPLIB_VCE_State_Table *states =
1070
				(ATOM_PPLIB_VCE_State_Table *)
1071
				(mode_info->atom_context->bios + data_offset +
1072
				 le16_to_cpu(ext_hdr->usVCETableOffset) + 1 +
1073
				 1 + (array->ucNumEntries * sizeof (VCEClockInfo)) +
1074
				 1 + (limits->numEntries * sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record)));
1075
			ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *entry;
1076
			ATOM_PPLIB_VCE_State_Record *state_entry;
1077
			VCEClockInfo *vce_clk;
1078
			u32 size = limits->numEntries *
1079
				sizeof(struct radeon_vce_clock_voltage_dependency_entry);
1080
			rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries =
1081
				kzalloc(size, GFP_KERNEL);
1082
			if (!rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries) {
1083
				r600_free_extended_power_table(rdev);
1084
				return -ENOMEM;
1085
			}
1086
			rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count =
1087
				limits->numEntries;
1088
			entry = &limits->entries[0];
1089
			state_entry = &states->entries[0];
1090
			for (i = 0; i < limits->numEntries; i++) {
1091
				vce_clk = (VCEClockInfo *)
1092
					((u8 *)&array->entries[0] +
1093
					 (entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
1094
				rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].evclk =
1095
					le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16);
1096
				rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].ecclk =
1097
					le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16);
1098
				rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v =
1099
					le16_to_cpu(entry->usVoltage);
1100
				entry = (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *)
1101
					((u8 *)entry + sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record));
1102
			}
1103
			for (i = 0; i < states->numEntries; i++) {
1104
				if (i >= RADEON_MAX_VCE_LEVELS)
1105
					break;
1106
				vce_clk = (VCEClockInfo *)
1107
					((u8 *)&array->entries[0] +
1108
					 (state_entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
1109
				rdev->pm.dpm.vce_states[i].evclk =
1110
					le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16);
1111
				rdev->pm.dpm.vce_states[i].ecclk =
1112
					le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16);
1113
				rdev->pm.dpm.vce_states[i].clk_idx =
1114
					state_entry->ucClockInfoIndex & 0x3f;
1115
				rdev->pm.dpm.vce_states[i].pstate =
1116
					(state_entry->ucClockInfoIndex & 0xc0) >> 6;
1117
				state_entry = (ATOM_PPLIB_VCE_State_Record *)
1118
					((u8 *)state_entry + sizeof(ATOM_PPLIB_VCE_State_Record));
1119
			}
1120
		}
1121
		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3) &&
1122
			ext_hdr->usUVDTableOffset) {
1123
			UVDClockInfoArray *array = (UVDClockInfoArray *)
1124
				(mode_info->atom_context->bios + data_offset +
1125
				 le16_to_cpu(ext_hdr->usUVDTableOffset) + 1);
1126
			ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *limits =
1127
				(ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *)
1128
				(mode_info->atom_context->bios + data_offset +
1129
				 le16_to_cpu(ext_hdr->usUVDTableOffset) + 1 +
1130
				 1 + (array->ucNumEntries * sizeof (UVDClockInfo)));
1131
			ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *entry;
1132
			u32 size = limits->numEntries *
1133
				sizeof(struct radeon_uvd_clock_voltage_dependency_entry);
1134
			rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries =
1135
				kzalloc(size, GFP_KERNEL);
1136
			if (!rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries) {
1137
				r600_free_extended_power_table(rdev);
1138
				return -ENOMEM;
1139
			}
1140
			rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count =
1141
				limits->numEntries;
1142
			entry = &limits->entries[0];
1143
			for (i = 0; i < limits->numEntries; i++) {
1144
				UVDClockInfo *uvd_clk = (UVDClockInfo *)
1145
					((u8 *)&array->entries[0] +
1146
					 (entry->ucUVDClockInfoIndex * sizeof(UVDClockInfo)));
1147
				rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].vclk =
1148
					le16_to_cpu(uvd_clk->usVClkLow) | (uvd_clk->ucVClkHigh << 16);
1149
				rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].dclk =
1150
					le16_to_cpu(uvd_clk->usDClkLow) | (uvd_clk->ucDClkHigh << 16);
1151
				rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v =
1152
					le16_to_cpu(entry->usVoltage);
1153
				entry = (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *)
1154
					((u8 *)entry + sizeof(ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record));
1155
			}
1156
		}
1157
		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4) &&
1158
			ext_hdr->usSAMUTableOffset) {
1159
			ATOM_PPLIB_SAMClk_Voltage_Limit_Table *limits =
1160
				(ATOM_PPLIB_SAMClk_Voltage_Limit_Table *)
1161
				(mode_info->atom_context->bios + data_offset +
1162
				 le16_to_cpu(ext_hdr->usSAMUTableOffset) + 1);
1163
			ATOM_PPLIB_SAMClk_Voltage_Limit_Record *entry;
1164
			u32 size = limits->numEntries *
1165
				sizeof(struct radeon_clock_voltage_dependency_entry);
1166
			rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries =
1167
				kzalloc(size, GFP_KERNEL);
1168
			if (!rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries) {
1169
				r600_free_extended_power_table(rdev);
1170
				return -ENOMEM;
1171
			}
1172
			rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count =
1173
				limits->numEntries;
1174
			entry = &limits->entries[0];
1175
			for (i = 0; i < limits->numEntries; i++) {
1176
				rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].clk =
1177
					le16_to_cpu(entry->usSAMClockLow) | (entry->ucSAMClockHigh << 16);
1178
				rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v =
1179
					le16_to_cpu(entry->usVoltage);
1180
				entry = (ATOM_PPLIB_SAMClk_Voltage_Limit_Record *)
1181
					((u8 *)entry + sizeof(ATOM_PPLIB_SAMClk_Voltage_Limit_Record));
1182
			}
1183
		}
1184
		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5) &&
1185
		    ext_hdr->usPPMTableOffset) {
1186
			ATOM_PPLIB_PPM_Table *ppm = (ATOM_PPLIB_PPM_Table *)
1187
				(mode_info->atom_context->bios + data_offset +
1188
				 le16_to_cpu(ext_hdr->usPPMTableOffset));
1189
			rdev->pm.dpm.dyn_state.ppm_table =
1190
				kzalloc(sizeof(struct radeon_ppm_table), GFP_KERNEL);
1191
			if (!rdev->pm.dpm.dyn_state.ppm_table) {
1192
				r600_free_extended_power_table(rdev);
1193
				return -ENOMEM;
1194
			}
1195
			rdev->pm.dpm.dyn_state.ppm_table->ppm_design = ppm->ucPpmDesign;
1196
			rdev->pm.dpm.dyn_state.ppm_table->cpu_core_number =
1197
				le16_to_cpu(ppm->usCpuCoreNumber);
1198
			rdev->pm.dpm.dyn_state.ppm_table->platform_tdp =
1199
				le32_to_cpu(ppm->ulPlatformTDP);
1200
			rdev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdp =
1201
				le32_to_cpu(ppm->ulSmallACPlatformTDP);
1202
			rdev->pm.dpm.dyn_state.ppm_table->platform_tdc =
1203
				le32_to_cpu(ppm->ulPlatformTDC);
1204
			rdev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdc =
1205
				le32_to_cpu(ppm->ulSmallACPlatformTDC);
1206
			rdev->pm.dpm.dyn_state.ppm_table->apu_tdp =
1207
				le32_to_cpu(ppm->ulApuTDP);
1208
			rdev->pm.dpm.dyn_state.ppm_table->dgpu_tdp =
1209
				le32_to_cpu(ppm->ulDGpuTDP);
1210
			rdev->pm.dpm.dyn_state.ppm_table->dgpu_ulv_power =
1211
				le32_to_cpu(ppm->ulDGpuUlvPower);
1212
			rdev->pm.dpm.dyn_state.ppm_table->tj_max =
1213
				le32_to_cpu(ppm->ulTjmax);
1214
		}
1215
		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6) &&
1216
			ext_hdr->usACPTableOffset) {
1217
			ATOM_PPLIB_ACPClk_Voltage_Limit_Table *limits =
1218
				(ATOM_PPLIB_ACPClk_Voltage_Limit_Table *)
1219
				(mode_info->atom_context->bios + data_offset +
1220
				 le16_to_cpu(ext_hdr->usACPTableOffset) + 1);
1221
			ATOM_PPLIB_ACPClk_Voltage_Limit_Record *entry;
1222
			u32 size = limits->numEntries *
1223
				sizeof(struct radeon_clock_voltage_dependency_entry);
1224
			rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries =
1225
				kzalloc(size, GFP_KERNEL);
1226
			if (!rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries) {
1227
				r600_free_extended_power_table(rdev);
1228
				return -ENOMEM;
1229
			}
1230
			rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count =
1231
				limits->numEntries;
1232
			entry = &limits->entries[0];
1233
			for (i = 0; i < limits->numEntries; i++) {
1234
				rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].clk =
1235
					le16_to_cpu(entry->usACPClockLow) | (entry->ucACPClockHigh << 16);
1236
				rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v =
1237
					le16_to_cpu(entry->usVoltage);
1238
				entry = (ATOM_PPLIB_ACPClk_Voltage_Limit_Record *)
1239
					((u8 *)entry + sizeof(ATOM_PPLIB_ACPClk_Voltage_Limit_Record));
1240
			}
1241
		}
1242
		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7) &&
1243
			ext_hdr->usPowerTuneTableOffset) {
1244
			u8 rev = *(u8 *)(mode_info->atom_context->bios + data_offset +
1245
					 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
1246
			ATOM_PowerTune_Table *pt;
1247
			rdev->pm.dpm.dyn_state.cac_tdp_table =
1248
				kzalloc(sizeof(struct radeon_cac_tdp_table), GFP_KERNEL);
1249
			if (!rdev->pm.dpm.dyn_state.cac_tdp_table) {
1250
				r600_free_extended_power_table(rdev);
1251
				return -ENOMEM;
1252
			}
1253
			if (rev > 0) {
1254
				ATOM_PPLIB_POWERTUNE_Table_V1 *ppt = (ATOM_PPLIB_POWERTUNE_Table_V1 *)
1255
					(mode_info->atom_context->bios + data_offset +
1256
					 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
1257
				rdev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit =
1258
					ppt->usMaximumPowerDeliveryLimit;
1259
				pt = &ppt->power_tune_table;
1260
			} else {
1261
				ATOM_PPLIB_POWERTUNE_Table *ppt = (ATOM_PPLIB_POWERTUNE_Table *)
1262
					(mode_info->atom_context->bios + data_offset +
1263
					 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
1264
				rdev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit = 255;
1265
				pt = &ppt->power_tune_table;
1266
			}
1267
			rdev->pm.dpm.dyn_state.cac_tdp_table->tdp = le16_to_cpu(pt->usTDP);
1268
			rdev->pm.dpm.dyn_state.cac_tdp_table->configurable_tdp =
1269
				le16_to_cpu(pt->usConfigurableTDP);
1270
			rdev->pm.dpm.dyn_state.cac_tdp_table->tdc = le16_to_cpu(pt->usTDC);
1271
			rdev->pm.dpm.dyn_state.cac_tdp_table->battery_power_limit =
1272
				le16_to_cpu(pt->usBatteryPowerLimit);
1273
			rdev->pm.dpm.dyn_state.cac_tdp_table->small_power_limit =
1274
				le16_to_cpu(pt->usSmallPowerLimit);
1275
			rdev->pm.dpm.dyn_state.cac_tdp_table->low_cac_leakage =
1276
				le16_to_cpu(pt->usLowCACLeakage);
1277
			rdev->pm.dpm.dyn_state.cac_tdp_table->high_cac_leakage =
1278
				le16_to_cpu(pt->usHighCACLeakage);
1279
		}
1280
	}
1281
 
1282
	return 0;
1283
}
1284
 
1285
void r600_free_extended_power_table(struct radeon_device *rdev)
1286
{
1287
	struct radeon_dpm_dynamic_state *dyn_state = &rdev->pm.dpm.dyn_state;
1288
 
1289
	kfree(dyn_state->vddc_dependency_on_sclk.entries);
1290
	kfree(dyn_state->vddci_dependency_on_mclk.entries);
1291
	kfree(dyn_state->vddc_dependency_on_mclk.entries);
1292
	kfree(dyn_state->mvdd_dependency_on_mclk.entries);
1293
	kfree(dyn_state->cac_leakage_table.entries);
1294
	kfree(dyn_state->phase_shedding_limits_table.entries);
1295
	kfree(dyn_state->ppm_table);
1296
	kfree(dyn_state->cac_tdp_table);
1297
	kfree(dyn_state->vce_clock_voltage_dependency_table.entries);
1298
	kfree(dyn_state->uvd_clock_voltage_dependency_table.entries);
1299
	kfree(dyn_state->samu_clock_voltage_dependency_table.entries);
1300
	kfree(dyn_state->acp_clock_voltage_dependency_table.entries);
1301
}
1302
 
1303
enum radeon_pcie_gen r600_get_pcie_gen_support(struct radeon_device *rdev,
1304
					       u32 sys_mask,
1305
					       enum radeon_pcie_gen asic_gen,
1306
					       enum radeon_pcie_gen default_gen)
1307
{
1308
	switch (asic_gen) {
1309
	case RADEON_PCIE_GEN1:
1310
		return RADEON_PCIE_GEN1;
1311
	case RADEON_PCIE_GEN2:
1312
		return RADEON_PCIE_GEN2;
1313
	case RADEON_PCIE_GEN3:
1314
		return RADEON_PCIE_GEN3;
1315
	default:
1316
		if ((sys_mask & DRM_PCIE_SPEED_80) && (default_gen == RADEON_PCIE_GEN3))
1317
			return RADEON_PCIE_GEN3;
1318
		else if ((sys_mask & DRM_PCIE_SPEED_50) && (default_gen == RADEON_PCIE_GEN2))
1319
			return RADEON_PCIE_GEN2;
1320
		else
1321
			return RADEON_PCIE_GEN1;
1322
	}
1323
	return RADEON_PCIE_GEN1;
1324
}
1325
 
1326
u16 r600_get_pcie_lane_support(struct radeon_device *rdev,
1327
			       u16 asic_lanes,
1328
			       u16 default_lanes)
1329
{
1330
	switch (asic_lanes) {
1331
	case 0:
1332
	default:
1333
		return default_lanes;
1334
	case 1:
1335
		return 1;
1336
	case 2:
1337
		return 2;
1338
	case 4:
1339
		return 4;
1340
	case 8:
1341
		return 8;
1342
	case 12:
1343
		return 12;
1344
	case 16:
1345
		return 16;
1346
	}
1347
}
1348
 
1349
u8 r600_encode_pci_lane_width(u32 lanes)
1350
{
1351
	u8 encoded_lanes[] = { 0, 1, 2, 0, 3, 0, 0, 0, 4, 0, 0, 0, 5, 0, 0, 0, 6 };
1352
 
1353
	if (lanes > 16)
1354
		return 0;
1355
 
1356
	return encoded_lanes[lanes];
1357
}