Subversion Repositories Kolibri OS

Rev

Rev 6104 | Rev 6938 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
5078 serge 1
/*
2
 * Copyright 2011 Advanced Micro Devices, Inc.
3
 *
4
 * Permission is hereby granted, free of charge, to any person obtaining a
5
 * copy of this software and associated documentation files (the "Software"),
6
 * to deal in the Software without restriction, including without limitation
7
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
 * and/or sell copies of the Software, and to permit persons to whom the
9
 * Software is furnished to do so, subject to the following conditions:
10
 *
11
 * The above copyright notice and this permission notice shall be included in
12
 * all copies or substantial portions of the Software.
13
 *
14
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20
 * OTHER DEALINGS IN THE SOFTWARE.
21
 *
22
 * Authors: Alex Deucher
23
 */
24
 
25
#include "drmP.h"
26
#include "radeon.h"
5271 serge 27
#include "radeon_asic.h"
5078 serge 28
#include "r600d.h"
29
#include "r600_dpm.h"
30
#include "atom.h"
31
 
32
const u32 r600_utc[R600_PM_NUMBER_OF_TC] =
33
{
34
	R600_UTC_DFLT_00,
35
	R600_UTC_DFLT_01,
36
	R600_UTC_DFLT_02,
37
	R600_UTC_DFLT_03,
38
	R600_UTC_DFLT_04,
39
	R600_UTC_DFLT_05,
40
	R600_UTC_DFLT_06,
41
	R600_UTC_DFLT_07,
42
	R600_UTC_DFLT_08,
43
	R600_UTC_DFLT_09,
44
	R600_UTC_DFLT_10,
45
	R600_UTC_DFLT_11,
46
	R600_UTC_DFLT_12,
47
	R600_UTC_DFLT_13,
48
	R600_UTC_DFLT_14,
49
};
50
 
51
const u32 r600_dtc[R600_PM_NUMBER_OF_TC] =
52
{
53
	R600_DTC_DFLT_00,
54
	R600_DTC_DFLT_01,
55
	R600_DTC_DFLT_02,
56
	R600_DTC_DFLT_03,
57
	R600_DTC_DFLT_04,
58
	R600_DTC_DFLT_05,
59
	R600_DTC_DFLT_06,
60
	R600_DTC_DFLT_07,
61
	R600_DTC_DFLT_08,
62
	R600_DTC_DFLT_09,
63
	R600_DTC_DFLT_10,
64
	R600_DTC_DFLT_11,
65
	R600_DTC_DFLT_12,
66
	R600_DTC_DFLT_13,
67
	R600_DTC_DFLT_14,
68
};
69
 
70
void r600_dpm_print_class_info(u32 class, u32 class2)
71
{
72
	printk("\tui class: ");
73
	switch (class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
74
	case ATOM_PPLIB_CLASSIFICATION_UI_NONE:
75
	default:
76
		printk("none\n");
77
		break;
78
	case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
79
		printk("battery\n");
80
		break;
81
	case ATOM_PPLIB_CLASSIFICATION_UI_BALANCED:
82
		printk("balanced\n");
83
		break;
84
	case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
85
		printk("performance\n");
86
		break;
87
	}
88
	printk("\tinternal class: ");
89
	if (((class & ~ATOM_PPLIB_CLASSIFICATION_UI_MASK) == 0) &&
90
	    (class2 == 0))
91
		printk("none");
92
	else {
93
		if (class & ATOM_PPLIB_CLASSIFICATION_BOOT)
94
			printk("boot ");
95
		if (class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
96
			printk("thermal ");
97
		if (class & ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE)
98
			printk("limited_pwr ");
99
		if (class & ATOM_PPLIB_CLASSIFICATION_REST)
100
			printk("rest ");
101
		if (class & ATOM_PPLIB_CLASSIFICATION_FORCED)
102
			printk("forced ");
103
		if (class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
104
			printk("3d_perf ");
105
		if (class & ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE)
106
			printk("ovrdrv ");
107
		if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
108
			printk("uvd ");
109
		if (class & ATOM_PPLIB_CLASSIFICATION_3DLOW)
110
			printk("3d_low ");
111
		if (class & ATOM_PPLIB_CLASSIFICATION_ACPI)
112
			printk("acpi ");
113
		if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
114
			printk("uvd_hd2 ");
115
		if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
116
			printk("uvd_hd ");
117
		if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
118
			printk("uvd_sd ");
119
		if (class2 & ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2)
120
			printk("limited_pwr2 ");
121
		if (class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
122
			printk("ulv ");
123
		if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
124
			printk("uvd_mvc ");
125
	}
126
	printk("\n");
127
}
128
 
129
void r600_dpm_print_cap_info(u32 caps)
130
{
131
	printk("\tcaps: ");
132
	if (caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY)
133
		printk("single_disp ");
134
	if (caps & ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK)
135
		printk("video ");
136
	if (caps & ATOM_PPLIB_DISALLOW_ON_DC)
137
		printk("no_dc ");
138
	printk("\n");
139
}
140
 
141
void r600_dpm_print_ps_status(struct radeon_device *rdev,
142
			      struct radeon_ps *rps)
143
{
144
	printk("\tstatus: ");
145
	if (rps == rdev->pm.dpm.current_ps)
146
		printk("c ");
147
	if (rps == rdev->pm.dpm.requested_ps)
148
		printk("r ");
149
	if (rps == rdev->pm.dpm.boot_ps)
150
		printk("b ");
151
	printk("\n");
152
}
153
 
154
u32 r600_dpm_get_vblank_time(struct radeon_device *rdev)
155
{
156
	struct drm_device *dev = rdev->ddev;
157
	struct drm_crtc *crtc;
158
	struct radeon_crtc *radeon_crtc;
6661 serge 159
	u32 vblank_in_pixels;
5078 serge 160
	u32 vblank_time_us = 0xffffffff; /* if the displays are off, vblank time is max */
161
 
162
	if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) {
163
		list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
164
			radeon_crtc = to_radeon_crtc(crtc);
165
			if (crtc->enabled && radeon_crtc->enabled && radeon_crtc->hw_mode.clock) {
6661 serge 166
				vblank_in_pixels =
167
					radeon_crtc->hw_mode.crtc_htotal *
168
					(radeon_crtc->hw_mode.crtc_vblank_end -
5078 serge 169
					radeon_crtc->hw_mode.crtc_vdisplay +
6661 serge 170
					 (radeon_crtc->v_border * 2));
171
 
172
				vblank_time_us = vblank_in_pixels * 1000 / radeon_crtc->hw_mode.clock;
5078 serge 173
				break;
174
			}
175
		}
176
	}
177
 
178
	return vblank_time_us;
179
}
180
 
181
u32 r600_dpm_get_vrefresh(struct radeon_device *rdev)
182
{
183
	struct drm_device *dev = rdev->ddev;
184
	struct drm_crtc *crtc;
185
	struct radeon_crtc *radeon_crtc;
186
	u32 vrefresh = 0;
187
 
188
	if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) {
189
		list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
190
			radeon_crtc = to_radeon_crtc(crtc);
191
			if (crtc->enabled && radeon_crtc->enabled && radeon_crtc->hw_mode.clock) {
6104 serge 192
				vrefresh = drm_mode_vrefresh(&radeon_crtc->hw_mode);
5078 serge 193
				break;
194
			}
195
		}
196
	}
197
	return vrefresh;
198
}
199
 
200
void r600_calculate_u_and_p(u32 i, u32 r_c, u32 p_b,
201
			    u32 *p, u32 *u)
202
{
203
	u32 b_c = 0;
204
	u32 i_c;
205
	u32 tmp;
206
 
207
	i_c = (i * r_c) / 100;
208
	tmp = i_c >> p_b;
209
 
210
	while (tmp) {
211
		b_c++;
212
		tmp >>= 1;
213
	}
214
 
215
	*u = (b_c + 1) / 2;
216
	*p = i_c / (1 << (2 * (*u)));
217
}
218
 
219
int r600_calculate_at(u32 t, u32 h, u32 fh, u32 fl, u32 *tl, u32 *th)
220
{
221
	u32 k, a, ah, al;
222
	u32 t1;
223
 
224
	if ((fl == 0) || (fh == 0) || (fl > fh))
225
		return -EINVAL;
226
 
227
	k = (100 * fh) / fl;
228
	t1 = (t * (k - 100));
229
	a = (1000 * (100 * h + t1)) / (10000 + (t1 / 100));
230
	a = (a + 5) / 10;
231
	ah = ((a * t) + 5000) / 10000;
232
	al = a - ah;
233
 
234
	*th = t - ah;
235
	*tl = t + al;
236
 
237
	return 0;
238
}
239
 
240
void r600_gfx_clockgating_enable(struct radeon_device *rdev, bool enable)
241
{
242
	int i;
243
 
244
	if (enable) {
245
		WREG32_P(SCLK_PWRMGT_CNTL, DYN_GFX_CLK_OFF_EN, ~DYN_GFX_CLK_OFF_EN);
246
	} else {
247
		WREG32_P(SCLK_PWRMGT_CNTL, 0, ~DYN_GFX_CLK_OFF_EN);
248
 
249
		WREG32(CG_RLC_REQ_AND_RSP, 0x2);
250
 
251
		for (i = 0; i < rdev->usec_timeout; i++) {
252
			if (((RREG32(CG_RLC_REQ_AND_RSP) & CG_RLC_RSP_TYPE_MASK) >> CG_RLC_RSP_TYPE_SHIFT) == 1)
253
				break;
254
			udelay(1);
255
		}
256
 
257
		WREG32(CG_RLC_REQ_AND_RSP, 0x0);
258
 
259
		WREG32(GRBM_PWR_CNTL, 0x1);
260
		RREG32(GRBM_PWR_CNTL);
261
	}
262
}
263
 
264
void r600_dynamicpm_enable(struct radeon_device *rdev, bool enable)
265
{
266
	if (enable)
267
		WREG32_P(GENERAL_PWRMGT, GLOBAL_PWRMGT_EN, ~GLOBAL_PWRMGT_EN);
268
	else
269
		WREG32_P(GENERAL_PWRMGT, 0, ~GLOBAL_PWRMGT_EN);
270
}
271
 
272
void r600_enable_thermal_protection(struct radeon_device *rdev, bool enable)
273
{
274
	if (enable)
275
		WREG32_P(GENERAL_PWRMGT, 0, ~THERMAL_PROTECTION_DIS);
276
	else
277
		WREG32_P(GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, ~THERMAL_PROTECTION_DIS);
278
}
279
 
280
void r600_enable_acpi_pm(struct radeon_device *rdev)
281
{
282
	WREG32_P(GENERAL_PWRMGT, STATIC_PM_EN, ~STATIC_PM_EN);
283
}
284
 
285
void r600_enable_dynamic_pcie_gen2(struct radeon_device *rdev, bool enable)
286
{
287
	if (enable)
288
		WREG32_P(GENERAL_PWRMGT, ENABLE_GEN2PCIE, ~ENABLE_GEN2PCIE);
289
	else
290
		WREG32_P(GENERAL_PWRMGT, 0, ~ENABLE_GEN2PCIE);
291
}
292
 
293
bool r600_dynamicpm_enabled(struct radeon_device *rdev)
294
{
295
	if (RREG32(GENERAL_PWRMGT) & GLOBAL_PWRMGT_EN)
296
		return true;
297
	else
298
		return false;
299
}
300
 
301
void r600_enable_sclk_control(struct radeon_device *rdev, bool enable)
302
{
303
	if (enable)
304
		WREG32_P(SCLK_PWRMGT_CNTL, 0, ~SCLK_PWRMGT_OFF);
305
	else
306
		WREG32_P(SCLK_PWRMGT_CNTL, SCLK_PWRMGT_OFF, ~SCLK_PWRMGT_OFF);
307
}
308
 
309
void r600_enable_mclk_control(struct radeon_device *rdev, bool enable)
310
{
311
	if (enable)
312
		WREG32_P(MCLK_PWRMGT_CNTL, 0, ~MPLL_PWRMGT_OFF);
313
	else
314
		WREG32_P(MCLK_PWRMGT_CNTL, MPLL_PWRMGT_OFF, ~MPLL_PWRMGT_OFF);
315
}
316
 
317
void r600_enable_spll_bypass(struct radeon_device *rdev, bool enable)
318
{
319
	if (enable)
320
		WREG32_P(CG_SPLL_FUNC_CNTL, SPLL_BYPASS_EN, ~SPLL_BYPASS_EN);
321
	else
322
		WREG32_P(CG_SPLL_FUNC_CNTL, 0, ~SPLL_BYPASS_EN);
323
}
324
 
325
void r600_wait_for_spll_change(struct radeon_device *rdev)
326
{
327
	int i;
328
 
329
	for (i = 0; i < rdev->usec_timeout; i++) {
330
		if (RREG32(CG_SPLL_FUNC_CNTL) & SPLL_CHG_STATUS)
331
			break;
332
		udelay(1);
333
	}
334
}
335
 
336
void r600_set_bsp(struct radeon_device *rdev, u32 u, u32 p)
337
{
338
	WREG32(CG_BSP, BSP(p) | BSU(u));
339
}
340
 
341
void r600_set_at(struct radeon_device *rdev,
342
		 u32 l_to_m, u32 m_to_h,
343
		 u32 h_to_m, u32 m_to_l)
344
{
345
	WREG32(CG_RT, FLS(l_to_m) | FMS(m_to_h));
346
	WREG32(CG_LT, FHS(h_to_m) | FMS(m_to_l));
347
}
348
 
349
void r600_set_tc(struct radeon_device *rdev,
350
		 u32 index, u32 u_t, u32 d_t)
351
{
352
	WREG32(CG_FFCT_0 + (index * 4), UTC_0(u_t) | DTC_0(d_t));
353
}
354
 
355
void r600_select_td(struct radeon_device *rdev,
356
		    enum r600_td td)
357
{
358
	if (td == R600_TD_AUTO)
359
		WREG32_P(SCLK_PWRMGT_CNTL, 0, ~FIR_FORCE_TREND_SEL);
360
	else
361
		WREG32_P(SCLK_PWRMGT_CNTL, FIR_FORCE_TREND_SEL, ~FIR_FORCE_TREND_SEL);
362
	if (td == R600_TD_UP)
363
		WREG32_P(SCLK_PWRMGT_CNTL, 0, ~FIR_TREND_MODE);
364
	if (td == R600_TD_DOWN)
365
		WREG32_P(SCLK_PWRMGT_CNTL, FIR_TREND_MODE, ~FIR_TREND_MODE);
366
}
367
 
368
void r600_set_vrc(struct radeon_device *rdev, u32 vrv)
369
{
370
	WREG32(CG_FTV, vrv);
371
}
372
 
373
void r600_set_tpu(struct radeon_device *rdev, u32 u)
374
{
375
	WREG32_P(CG_TPC, TPU(u), ~TPU_MASK);
376
}
377
 
378
void r600_set_tpc(struct radeon_device *rdev, u32 c)
379
{
380
	WREG32_P(CG_TPC, TPCC(c), ~TPCC_MASK);
381
}
382
 
383
void r600_set_sstu(struct radeon_device *rdev, u32 u)
384
{
385
	WREG32_P(CG_SSP, CG_SSTU(u), ~CG_SSTU_MASK);
386
}
387
 
388
void r600_set_sst(struct radeon_device *rdev, u32 t)
389
{
390
	WREG32_P(CG_SSP, CG_SST(t), ~CG_SST_MASK);
391
}
392
 
393
void r600_set_git(struct radeon_device *rdev, u32 t)
394
{
395
	WREG32_P(CG_GIT, CG_GICST(t), ~CG_GICST_MASK);
396
}
397
 
398
void r600_set_fctu(struct radeon_device *rdev, u32 u)
399
{
400
	WREG32_P(CG_FC_T, FC_TU(u), ~FC_TU_MASK);
401
}
402
 
403
void r600_set_fct(struct radeon_device *rdev, u32 t)
404
{
405
	WREG32_P(CG_FC_T, FC_T(t), ~FC_T_MASK);
406
}
407
 
408
void r600_set_ctxcgtt3d_rphc(struct radeon_device *rdev, u32 p)
409
{
410
	WREG32_P(CG_CTX_CGTT3D_R, PHC(p), ~PHC_MASK);
411
}
412
 
413
void r600_set_ctxcgtt3d_rsdc(struct radeon_device *rdev, u32 s)
414
{
415
	WREG32_P(CG_CTX_CGTT3D_R, SDC(s), ~SDC_MASK);
416
}
417
 
418
void r600_set_vddc3d_oorsu(struct radeon_device *rdev, u32 u)
419
{
420
	WREG32_P(CG_VDDC3D_OOR, SU(u), ~SU_MASK);
421
}
422
 
423
void r600_set_vddc3d_oorphc(struct radeon_device *rdev, u32 p)
424
{
425
	WREG32_P(CG_VDDC3D_OOR, PHC(p), ~PHC_MASK);
426
}
427
 
428
void r600_set_vddc3d_oorsdc(struct radeon_device *rdev, u32 s)
429
{
430
	WREG32_P(CG_VDDC3D_OOR, SDC(s), ~SDC_MASK);
431
}
432
 
433
void r600_set_mpll_lock_time(struct radeon_device *rdev, u32 lock_time)
434
{
435
	WREG32_P(MPLL_TIME, MPLL_LOCK_TIME(lock_time), ~MPLL_LOCK_TIME_MASK);
436
}
437
 
438
void r600_set_mpll_reset_time(struct radeon_device *rdev, u32 reset_time)
439
{
440
	WREG32_P(MPLL_TIME, MPLL_RESET_TIME(reset_time), ~MPLL_RESET_TIME_MASK);
441
}
442
 
443
void r600_engine_clock_entry_enable(struct radeon_device *rdev,
444
				    u32 index, bool enable)
445
{
446
	if (enable)
447
		WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART2 + (index * 4 * 2),
448
			 STEP_0_SPLL_ENTRY_VALID, ~STEP_0_SPLL_ENTRY_VALID);
449
	else
450
		WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART2 + (index * 4 * 2),
451
			 0, ~STEP_0_SPLL_ENTRY_VALID);
452
}
453
 
454
void r600_engine_clock_entry_enable_pulse_skipping(struct radeon_device *rdev,
455
						   u32 index, bool enable)
456
{
457
	if (enable)
458
		WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART2 + (index * 4 * 2),
459
			 STEP_0_SPLL_STEP_ENABLE, ~STEP_0_SPLL_STEP_ENABLE);
460
	else
461
		WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART2 + (index * 4 * 2),
462
			 0, ~STEP_0_SPLL_STEP_ENABLE);
463
}
464
 
465
void r600_engine_clock_entry_enable_post_divider(struct radeon_device *rdev,
466
						 u32 index, bool enable)
467
{
468
	if (enable)
469
		WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART2 + (index * 4 * 2),
470
			 STEP_0_POST_DIV_EN, ~STEP_0_POST_DIV_EN);
471
	else
472
		WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART2 + (index * 4 * 2),
473
			 0, ~STEP_0_POST_DIV_EN);
474
}
475
 
476
void r600_engine_clock_entry_set_post_divider(struct radeon_device *rdev,
477
					      u32 index, u32 divider)
478
{
479
	WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART1 + (index * 4 * 2),
480
		 STEP_0_SPLL_POST_DIV(divider), ~STEP_0_SPLL_POST_DIV_MASK);
481
}
482
 
483
void r600_engine_clock_entry_set_reference_divider(struct radeon_device *rdev,
484
						   u32 index, u32 divider)
485
{
486
	WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART1 + (index * 4 * 2),
487
		 STEP_0_SPLL_REF_DIV(divider), ~STEP_0_SPLL_REF_DIV_MASK);
488
}
489
 
490
void r600_engine_clock_entry_set_feedback_divider(struct radeon_device *rdev,
491
						  u32 index, u32 divider)
492
{
493
	WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART1 + (index * 4 * 2),
494
		 STEP_0_SPLL_FB_DIV(divider), ~STEP_0_SPLL_FB_DIV_MASK);
495
}
496
 
497
void r600_engine_clock_entry_set_step_time(struct radeon_device *rdev,
498
					   u32 index, u32 step_time)
499
{
500
	WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART1 + (index * 4 * 2),
501
		 STEP_0_SPLL_STEP_TIME(step_time), ~STEP_0_SPLL_STEP_TIME_MASK);
502
}
503
 
504
void r600_vid_rt_set_ssu(struct radeon_device *rdev, u32 u)
505
{
506
	WREG32_P(VID_RT, SSTU(u), ~SSTU_MASK);
507
}
508
 
509
void r600_vid_rt_set_vru(struct radeon_device *rdev, u32 u)
510
{
511
	WREG32_P(VID_RT, VID_CRTU(u), ~VID_CRTU_MASK);
512
}
513
 
514
void r600_vid_rt_set_vrt(struct radeon_device *rdev, u32 rt)
515
{
516
	WREG32_P(VID_RT, VID_CRT(rt), ~VID_CRT_MASK);
517
}
518
 
519
void r600_voltage_control_enable_pins(struct radeon_device *rdev,
520
				      u64 mask)
521
{
522
	WREG32(LOWER_GPIO_ENABLE, mask & 0xffffffff);
523
	WREG32(UPPER_GPIO_ENABLE, upper_32_bits(mask));
524
}
525
 
526
 
527
void r600_voltage_control_program_voltages(struct radeon_device *rdev,
528
					   enum r600_power_level index, u64 pins)
529
{
530
	u32 tmp, mask;
531
	u32 ix = 3 - (3 & index);
532
 
533
	WREG32(CTXSW_VID_LOWER_GPIO_CNTL + (ix * 4), pins & 0xffffffff);
534
 
535
	mask = 7 << (3 * ix);
536
	tmp = RREG32(VID_UPPER_GPIO_CNTL);
537
	tmp = (tmp & ~mask) | ((pins >> (32 - (3 * ix))) & mask);
538
	WREG32(VID_UPPER_GPIO_CNTL, tmp);
539
}
540
 
541
void r600_voltage_control_deactivate_static_control(struct radeon_device *rdev,
542
						    u64 mask)
543
{
544
	u32 gpio;
545
 
546
	gpio = RREG32(GPIOPAD_MASK);
547
	gpio &= ~mask;
548
	WREG32(GPIOPAD_MASK, gpio);
549
 
550
	gpio = RREG32(GPIOPAD_EN);
551
	gpio &= ~mask;
552
	WREG32(GPIOPAD_EN, gpio);
553
 
554
	gpio = RREG32(GPIOPAD_A);
555
	gpio &= ~mask;
556
	WREG32(GPIOPAD_A, gpio);
557
}
558
 
559
void r600_power_level_enable(struct radeon_device *rdev,
560
			     enum r600_power_level index, bool enable)
561
{
562
	u32 ix = 3 - (3 & index);
563
 
564
	if (enable)
565
		WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4), CTXSW_FREQ_STATE_ENABLE,
566
			 ~CTXSW_FREQ_STATE_ENABLE);
567
	else
568
		WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4), 0,
569
			 ~CTXSW_FREQ_STATE_ENABLE);
570
}
571
 
572
void r600_power_level_set_voltage_index(struct radeon_device *rdev,
573
					enum r600_power_level index, u32 voltage_index)
574
{
575
	u32 ix = 3 - (3 & index);
576
 
577
	WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4),
578
		 CTXSW_FREQ_VIDS_CFG_INDEX(voltage_index), ~CTXSW_FREQ_VIDS_CFG_INDEX_MASK);
579
}
580
 
581
void r600_power_level_set_mem_clock_index(struct radeon_device *rdev,
582
					  enum r600_power_level index, u32 mem_clock_index)
583
{
584
	u32 ix = 3 - (3 & index);
585
 
586
	WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4),
587
		 CTXSW_FREQ_MCLK_CFG_INDEX(mem_clock_index), ~CTXSW_FREQ_MCLK_CFG_INDEX_MASK);
588
}
589
 
590
void r600_power_level_set_eng_clock_index(struct radeon_device *rdev,
591
					  enum r600_power_level index, u32 eng_clock_index)
592
{
593
	u32 ix = 3 - (3 & index);
594
 
595
	WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4),
596
		 CTXSW_FREQ_SCLK_CFG_INDEX(eng_clock_index), ~CTXSW_FREQ_SCLK_CFG_INDEX_MASK);
597
}
598
 
599
void r600_power_level_set_watermark_id(struct radeon_device *rdev,
600
				       enum r600_power_level index,
601
				       enum r600_display_watermark watermark_id)
602
{
603
	u32 ix = 3 - (3 & index);
604
	u32 tmp = 0;
605
 
606
	if (watermark_id == R600_DISPLAY_WATERMARK_HIGH)
607
		tmp = CTXSW_FREQ_DISPLAY_WATERMARK;
608
	WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4), tmp, ~CTXSW_FREQ_DISPLAY_WATERMARK);
609
}
610
 
611
void r600_power_level_set_pcie_gen2(struct radeon_device *rdev,
612
				    enum r600_power_level index, bool compatible)
613
{
614
	u32 ix = 3 - (3 & index);
615
	u32 tmp = 0;
616
 
617
	if (compatible)
618
		tmp = CTXSW_FREQ_GEN2PCIE_VOLT;
619
	WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4), tmp, ~CTXSW_FREQ_GEN2PCIE_VOLT);
620
}
621
 
622
enum r600_power_level r600_power_level_get_current_index(struct radeon_device *rdev)
623
{
624
	u32 tmp;
625
 
626
	tmp = RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_PROFILE_INDEX_MASK;
627
	tmp >>= CURRENT_PROFILE_INDEX_SHIFT;
628
	return tmp;
629
}
630
 
631
enum r600_power_level r600_power_level_get_target_index(struct radeon_device *rdev)
632
{
633
	u32 tmp;
634
 
635
	tmp = RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & TARGET_PROFILE_INDEX_MASK;
636
	tmp >>= TARGET_PROFILE_INDEX_SHIFT;
637
	return tmp;
638
}
639
 
640
void r600_power_level_set_enter_index(struct radeon_device *rdev,
641
				      enum r600_power_level index)
642
{
643
	WREG32_P(TARGET_AND_CURRENT_PROFILE_INDEX, DYN_PWR_ENTER_INDEX(index),
644
		 ~DYN_PWR_ENTER_INDEX_MASK);
645
}
646
 
647
void r600_wait_for_power_level_unequal(struct radeon_device *rdev,
648
				       enum r600_power_level index)
649
{
650
	int i;
651
 
652
	for (i = 0; i < rdev->usec_timeout; i++) {
653
		if (r600_power_level_get_target_index(rdev) != index)
654
			break;
655
		udelay(1);
656
	}
657
 
658
	for (i = 0; i < rdev->usec_timeout; i++) {
659
		if (r600_power_level_get_current_index(rdev) != index)
660
			break;
661
		udelay(1);
662
	}
663
}
664
 
665
void r600_wait_for_power_level(struct radeon_device *rdev,
666
			       enum r600_power_level index)
667
{
668
	int i;
669
 
670
	for (i = 0; i < rdev->usec_timeout; i++) {
671
		if (r600_power_level_get_target_index(rdev) == index)
672
			break;
673
		udelay(1);
674
	}
675
 
676
	for (i = 0; i < rdev->usec_timeout; i++) {
677
		if (r600_power_level_get_current_index(rdev) == index)
678
			break;
679
		udelay(1);
680
	}
681
}
682
 
683
void r600_start_dpm(struct radeon_device *rdev)
684
{
685
	r600_enable_sclk_control(rdev, false);
686
	r600_enable_mclk_control(rdev, false);
687
 
688
	r600_dynamicpm_enable(rdev, true);
689
 
690
	radeon_wait_for_vblank(rdev, 0);
691
	radeon_wait_for_vblank(rdev, 1);
692
 
693
	r600_enable_spll_bypass(rdev, true);
694
	r600_wait_for_spll_change(rdev);
695
	r600_enable_spll_bypass(rdev, false);
696
	r600_wait_for_spll_change(rdev);
697
 
698
	r600_enable_spll_bypass(rdev, true);
699
	r600_wait_for_spll_change(rdev);
700
	r600_enable_spll_bypass(rdev, false);
701
	r600_wait_for_spll_change(rdev);
702
 
703
	r600_enable_sclk_control(rdev, true);
704
	r600_enable_mclk_control(rdev, true);
705
}
706
 
707
void r600_stop_dpm(struct radeon_device *rdev)
708
{
709
	r600_dynamicpm_enable(rdev, false);
710
}
711
 
712
int r600_dpm_pre_set_power_state(struct radeon_device *rdev)
713
{
714
	return 0;
715
}
716
 
717
void r600_dpm_post_set_power_state(struct radeon_device *rdev)
718
{
719
 
720
}
721
 
722
bool r600_is_uvd_state(u32 class, u32 class2)
723
{
724
	if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
725
		return true;
726
	if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
727
		return true;
728
	if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
729
		return true;
730
	if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
731
		return true;
732
	if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
733
		return true;
734
	return false;
735
}
736
 
737
static int r600_set_thermal_temperature_range(struct radeon_device *rdev,
738
					      int min_temp, int max_temp)
739
{
740
	int low_temp = 0 * 1000;
741
	int high_temp = 255 * 1000;
742
 
743
	if (low_temp < min_temp)
744
		low_temp = min_temp;
745
	if (high_temp > max_temp)
746
		high_temp = max_temp;
747
	if (high_temp < low_temp) {
748
		DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp);
749
		return -EINVAL;
750
	}
751
 
752
	WREG32_P(CG_THERMAL_INT, DIG_THERM_INTH(high_temp / 1000), ~DIG_THERM_INTH_MASK);
753
	WREG32_P(CG_THERMAL_INT, DIG_THERM_INTL(low_temp / 1000), ~DIG_THERM_INTL_MASK);
754
	WREG32_P(CG_THERMAL_CTRL, DIG_THERM_DPM(high_temp / 1000), ~DIG_THERM_DPM_MASK);
755
 
756
	rdev->pm.dpm.thermal.min_temp = low_temp;
757
	rdev->pm.dpm.thermal.max_temp = high_temp;
758
 
759
	return 0;
760
}
761
 
762
bool r600_is_internal_thermal_sensor(enum radeon_int_thermal_type sensor)
763
{
764
	switch (sensor) {
765
	case THERMAL_TYPE_RV6XX:
766
	case THERMAL_TYPE_RV770:
767
	case THERMAL_TYPE_EVERGREEN:
768
	case THERMAL_TYPE_SUMO:
769
	case THERMAL_TYPE_NI:
770
	case THERMAL_TYPE_SI:
771
	case THERMAL_TYPE_CI:
772
	case THERMAL_TYPE_KV:
773
		return true;
774
	case THERMAL_TYPE_ADT7473_WITH_INTERNAL:
775
	case THERMAL_TYPE_EMC2103_WITH_INTERNAL:
776
		return false; /* need special handling */
777
	case THERMAL_TYPE_NONE:
778
	case THERMAL_TYPE_EXTERNAL:
779
	case THERMAL_TYPE_EXTERNAL_GPIO:
780
	default:
781
		return false;
782
	}
783
}
784
 
785
int r600_dpm_late_enable(struct radeon_device *rdev)
786
{
787
	int ret;
788
 
789
	if (rdev->irq.installed &&
790
	    r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
791
		ret = r600_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
792
		if (ret)
793
			return ret;
794
		rdev->irq.dpm_thermal = true;
795
		radeon_irq_set(rdev);
796
	}
797
 
798
	return 0;
799
}
800
 
801
union power_info {
802
	struct _ATOM_POWERPLAY_INFO info;
803
	struct _ATOM_POWERPLAY_INFO_V2 info_2;
804
	struct _ATOM_POWERPLAY_INFO_V3 info_3;
805
	struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
806
	struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
807
	struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
808
	struct _ATOM_PPLIB_POWERPLAYTABLE4 pplib4;
809
	struct _ATOM_PPLIB_POWERPLAYTABLE5 pplib5;
810
};
811
 
812
union fan_info {
813
	struct _ATOM_PPLIB_FANTABLE fan;
814
	struct _ATOM_PPLIB_FANTABLE2 fan2;
5271 serge 815
	struct _ATOM_PPLIB_FANTABLE3 fan3;
5078 serge 816
};
817
 
818
static int r600_parse_clk_voltage_dep_table(struct radeon_clock_voltage_dependency_table *radeon_table,
819
					    ATOM_PPLIB_Clock_Voltage_Dependency_Table *atom_table)
820
{
821
	u32 size = atom_table->ucNumEntries *
822
		sizeof(struct radeon_clock_voltage_dependency_entry);
823
	int i;
824
	ATOM_PPLIB_Clock_Voltage_Dependency_Record *entry;
825
 
826
	radeon_table->entries = kzalloc(size, GFP_KERNEL);
827
	if (!radeon_table->entries)
828
		return -ENOMEM;
829
 
830
	entry = &atom_table->entries[0];
831
	for (i = 0; i < atom_table->ucNumEntries; i++) {
832
		radeon_table->entries[i].clk = le16_to_cpu(entry->usClockLow) |
833
			(entry->ucClockHigh << 16);
834
		radeon_table->entries[i].v = le16_to_cpu(entry->usVoltage);
835
		entry = (ATOM_PPLIB_Clock_Voltage_Dependency_Record *)
836
			((u8 *)entry + sizeof(ATOM_PPLIB_Clock_Voltage_Dependency_Record));
837
	}
838
	radeon_table->count = atom_table->ucNumEntries;
839
 
840
	return 0;
841
}
842
 
843
int r600_get_platform_caps(struct radeon_device *rdev)
844
{
845
	struct radeon_mode_info *mode_info = &rdev->mode_info;
846
	union power_info *power_info;
847
	int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
848
        u16 data_offset;
849
	u8 frev, crev;
850
 
851
	if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
852
				   &frev, &crev, &data_offset))
853
		return -EINVAL;
854
	power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
855
 
856
	rdev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps);
857
	rdev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime);
858
	rdev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime);
859
 
860
	return 0;
861
}
862
 
863
/* sizeof(ATOM_PPLIB_EXTENDEDHEADER) */
864
#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2 12
865
#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3 14
866
#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4 16
867
#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5 18
868
#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6 20
869
#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7 22
870
 
871
int r600_parse_extended_power_table(struct radeon_device *rdev)
872
{
873
	struct radeon_mode_info *mode_info = &rdev->mode_info;
874
	union power_info *power_info;
875
	union fan_info *fan_info;
876
	ATOM_PPLIB_Clock_Voltage_Dependency_Table *dep_table;
877
	int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
878
        u16 data_offset;
879
	u8 frev, crev;
880
	int ret, i;
881
 
882
	if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
883
				   &frev, &crev, &data_offset))
884
		return -EINVAL;
885
	power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
886
 
887
	/* fan table */
888
	if (le16_to_cpu(power_info->pplib.usTableSize) >=
889
	    sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) {
890
		if (power_info->pplib3.usFanTableOffset) {
891
			fan_info = (union fan_info *)(mode_info->atom_context->bios + data_offset +
892
						      le16_to_cpu(power_info->pplib3.usFanTableOffset));
893
			rdev->pm.dpm.fan.t_hyst = fan_info->fan.ucTHyst;
894
			rdev->pm.dpm.fan.t_min = le16_to_cpu(fan_info->fan.usTMin);
895
			rdev->pm.dpm.fan.t_med = le16_to_cpu(fan_info->fan.usTMed);
896
			rdev->pm.dpm.fan.t_high = le16_to_cpu(fan_info->fan.usTHigh);
897
			rdev->pm.dpm.fan.pwm_min = le16_to_cpu(fan_info->fan.usPWMMin);
898
			rdev->pm.dpm.fan.pwm_med = le16_to_cpu(fan_info->fan.usPWMMed);
899
			rdev->pm.dpm.fan.pwm_high = le16_to_cpu(fan_info->fan.usPWMHigh);
900
			if (fan_info->fan.ucFanTableFormat >= 2)
901
				rdev->pm.dpm.fan.t_max = le16_to_cpu(fan_info->fan2.usTMax);
902
			else
903
				rdev->pm.dpm.fan.t_max = 10900;
904
			rdev->pm.dpm.fan.cycle_delay = 100000;
5271 serge 905
			if (fan_info->fan.ucFanTableFormat >= 3) {
906
				rdev->pm.dpm.fan.control_mode = fan_info->fan3.ucFanControlMode;
907
				rdev->pm.dpm.fan.default_max_fan_pwm =
908
					le16_to_cpu(fan_info->fan3.usFanPWMMax);
909
				rdev->pm.dpm.fan.default_fan_output_sensitivity = 4836;
910
				rdev->pm.dpm.fan.fan_output_sensitivity =
911
					le16_to_cpu(fan_info->fan3.usFanOutputSensitivity);
912
			}
5078 serge 913
			rdev->pm.dpm.fan.ucode_fan_control = true;
914
		}
915
	}
916
 
917
	/* clock dependancy tables, shedding tables */
918
	if (le16_to_cpu(power_info->pplib.usTableSize) >=
919
	    sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE4)) {
920
		if (power_info->pplib4.usVddcDependencyOnSCLKOffset) {
921
			dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
922
				(mode_info->atom_context->bios + data_offset +
923
				 le16_to_cpu(power_info->pplib4.usVddcDependencyOnSCLKOffset));
924
			ret = r600_parse_clk_voltage_dep_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
925
							       dep_table);
926
			if (ret)
927
				return ret;
928
		}
929
		if (power_info->pplib4.usVddciDependencyOnMCLKOffset) {
930
			dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
931
				(mode_info->atom_context->bios + data_offset +
932
				 le16_to_cpu(power_info->pplib4.usVddciDependencyOnMCLKOffset));
933
			ret = r600_parse_clk_voltage_dep_table(&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
934
							       dep_table);
935
			if (ret) {
936
				kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries);
937
				return ret;
938
			}
939
		}
940
		if (power_info->pplib4.usVddcDependencyOnMCLKOffset) {
941
			dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
942
				(mode_info->atom_context->bios + data_offset +
943
				 le16_to_cpu(power_info->pplib4.usVddcDependencyOnMCLKOffset));
944
			ret = r600_parse_clk_voltage_dep_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
945
							       dep_table);
946
			if (ret) {
947
				kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries);
948
				kfree(rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries);
949
				return ret;
950
			}
951
		}
952
		if (power_info->pplib4.usMvddDependencyOnMCLKOffset) {
953
			dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
954
				(mode_info->atom_context->bios + data_offset +
955
				 le16_to_cpu(power_info->pplib4.usMvddDependencyOnMCLKOffset));
956
			ret = r600_parse_clk_voltage_dep_table(&rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
957
							       dep_table);
958
			if (ret) {
959
				kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries);
960
				kfree(rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries);
961
				kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries);
962
				return ret;
963
			}
964
		}
965
		if (power_info->pplib4.usMaxClockVoltageOnDCOffset) {
966
			ATOM_PPLIB_Clock_Voltage_Limit_Table *clk_v =
967
				(ATOM_PPLIB_Clock_Voltage_Limit_Table *)
968
				(mode_info->atom_context->bios + data_offset +
969
				 le16_to_cpu(power_info->pplib4.usMaxClockVoltageOnDCOffset));
970
			if (clk_v->ucNumEntries) {
971
				rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk =
972
					le16_to_cpu(clk_v->entries[0].usSclkLow) |
973
					(clk_v->entries[0].ucSclkHigh << 16);
974
				rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk =
975
					le16_to_cpu(clk_v->entries[0].usMclkLow) |
976
					(clk_v->entries[0].ucMclkHigh << 16);
977
				rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddc =
978
					le16_to_cpu(clk_v->entries[0].usVddc);
979
				rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddci =
980
					le16_to_cpu(clk_v->entries[0].usVddci);
981
			}
982
		}
983
		if (power_info->pplib4.usVddcPhaseShedLimitsTableOffset) {
984
			ATOM_PPLIB_PhaseSheddingLimits_Table *psl =
985
				(ATOM_PPLIB_PhaseSheddingLimits_Table *)
986
				(mode_info->atom_context->bios + data_offset +
987
				 le16_to_cpu(power_info->pplib4.usVddcPhaseShedLimitsTableOffset));
988
			ATOM_PPLIB_PhaseSheddingLimits_Record *entry;
989
 
990
			rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries =
991
				kzalloc(psl->ucNumEntries *
992
					sizeof(struct radeon_phase_shedding_limits_entry),
993
					GFP_KERNEL);
994
			if (!rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries) {
995
				r600_free_extended_power_table(rdev);
996
				return -ENOMEM;
997
			}
998
 
999
			entry = &psl->entries[0];
1000
			for (i = 0; i < psl->ucNumEntries; i++) {
1001
				rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].sclk =
1002
					le16_to_cpu(entry->usSclkLow) | (entry->ucSclkHigh << 16);
1003
				rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].mclk =
1004
					le16_to_cpu(entry->usMclkLow) | (entry->ucMclkHigh << 16);
1005
				rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].voltage =
1006
					le16_to_cpu(entry->usVoltage);
1007
				entry = (ATOM_PPLIB_PhaseSheddingLimits_Record *)
1008
					((u8 *)entry + sizeof(ATOM_PPLIB_PhaseSheddingLimits_Record));
1009
			}
1010
			rdev->pm.dpm.dyn_state.phase_shedding_limits_table.count =
1011
				psl->ucNumEntries;
1012
		}
1013
	}
1014
 
1015
	/* cac data */
1016
	if (le16_to_cpu(power_info->pplib.usTableSize) >=
1017
	    sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE5)) {
1018
		rdev->pm.dpm.tdp_limit = le32_to_cpu(power_info->pplib5.ulTDPLimit);
1019
		rdev->pm.dpm.near_tdp_limit = le32_to_cpu(power_info->pplib5.ulNearTDPLimit);
1020
		rdev->pm.dpm.near_tdp_limit_adjusted = rdev->pm.dpm.near_tdp_limit;
1021
		rdev->pm.dpm.tdp_od_limit = le16_to_cpu(power_info->pplib5.usTDPODLimit);
1022
		if (rdev->pm.dpm.tdp_od_limit)
1023
			rdev->pm.dpm.power_control = true;
1024
		else
1025
			rdev->pm.dpm.power_control = false;
1026
		rdev->pm.dpm.tdp_adjustment = 0;
1027
		rdev->pm.dpm.sq_ramping_threshold = le32_to_cpu(power_info->pplib5.ulSQRampingThreshold);
1028
		rdev->pm.dpm.cac_leakage = le32_to_cpu(power_info->pplib5.ulCACLeakage);
1029
		rdev->pm.dpm.load_line_slope = le16_to_cpu(power_info->pplib5.usLoadLineSlope);
1030
		if (power_info->pplib5.usCACLeakageTableOffset) {
1031
			ATOM_PPLIB_CAC_Leakage_Table *cac_table =
1032
				(ATOM_PPLIB_CAC_Leakage_Table *)
1033
				(mode_info->atom_context->bios + data_offset +
1034
				 le16_to_cpu(power_info->pplib5.usCACLeakageTableOffset));
1035
			ATOM_PPLIB_CAC_Leakage_Record *entry;
1036
			u32 size = cac_table->ucNumEntries * sizeof(struct radeon_cac_leakage_table);
1037
			rdev->pm.dpm.dyn_state.cac_leakage_table.entries = kzalloc(size, GFP_KERNEL);
1038
			if (!rdev->pm.dpm.dyn_state.cac_leakage_table.entries) {
1039
				r600_free_extended_power_table(rdev);
1040
				return -ENOMEM;
1041
			}
1042
			entry = &cac_table->entries[0];
1043
			for (i = 0; i < cac_table->ucNumEntries; i++) {
1044
				if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
1045
					rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1 =
1046
						le16_to_cpu(entry->usVddc1);
1047
					rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2 =
1048
						le16_to_cpu(entry->usVddc2);
1049
					rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3 =
1050
						le16_to_cpu(entry->usVddc3);
1051
				} else {
1052
					rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc =
1053
						le16_to_cpu(entry->usVddc);
1054
					rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage =
1055
						le32_to_cpu(entry->ulLeakageValue);
1056
				}
1057
				entry = (ATOM_PPLIB_CAC_Leakage_Record *)
1058
					((u8 *)entry + sizeof(ATOM_PPLIB_CAC_Leakage_Record));
1059
			}
1060
			rdev->pm.dpm.dyn_state.cac_leakage_table.count = cac_table->ucNumEntries;
1061
		}
1062
	}
1063
 
1064
	/* ext tables */
1065
	if (le16_to_cpu(power_info->pplib.usTableSize) >=
1066
	    sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) {
1067
		ATOM_PPLIB_EXTENDEDHEADER *ext_hdr = (ATOM_PPLIB_EXTENDEDHEADER *)
1068
			(mode_info->atom_context->bios + data_offset +
1069
			 le16_to_cpu(power_info->pplib3.usExtendendedHeaderOffset));
1070
		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2) &&
1071
			ext_hdr->usVCETableOffset) {
1072
			VCEClockInfoArray *array = (VCEClockInfoArray *)
1073
				(mode_info->atom_context->bios + data_offset +
1074
                                 le16_to_cpu(ext_hdr->usVCETableOffset) + 1);
1075
			ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *limits =
1076
				(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *)
1077
				(mode_info->atom_context->bios + data_offset +
1078
				 le16_to_cpu(ext_hdr->usVCETableOffset) + 1 +
1079
				 1 + array->ucNumEntries * sizeof(VCEClockInfo));
1080
			ATOM_PPLIB_VCE_State_Table *states =
1081
				(ATOM_PPLIB_VCE_State_Table *)
1082
				(mode_info->atom_context->bios + data_offset +
1083
				 le16_to_cpu(ext_hdr->usVCETableOffset) + 1 +
1084
				 1 + (array->ucNumEntries * sizeof (VCEClockInfo)) +
1085
				 1 + (limits->numEntries * sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record)));
1086
			ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *entry;
1087
			ATOM_PPLIB_VCE_State_Record *state_entry;
1088
			VCEClockInfo *vce_clk;
1089
			u32 size = limits->numEntries *
1090
				sizeof(struct radeon_vce_clock_voltage_dependency_entry);
1091
			rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries =
1092
				kzalloc(size, GFP_KERNEL);
1093
			if (!rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries) {
1094
				r600_free_extended_power_table(rdev);
1095
				return -ENOMEM;
1096
			}
1097
			rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count =
1098
				limits->numEntries;
1099
			entry = &limits->entries[0];
1100
			state_entry = &states->entries[0];
1101
			for (i = 0; i < limits->numEntries; i++) {
1102
				vce_clk = (VCEClockInfo *)
1103
					((u8 *)&array->entries[0] +
1104
					 (entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
1105
				rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].evclk =
1106
					le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16);
1107
				rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].ecclk =
1108
					le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16);
1109
				rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v =
1110
					le16_to_cpu(entry->usVoltage);
1111
				entry = (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *)
1112
					((u8 *)entry + sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record));
1113
			}
1114
			for (i = 0; i < states->numEntries; i++) {
1115
				if (i >= RADEON_MAX_VCE_LEVELS)
1116
					break;
1117
				vce_clk = (VCEClockInfo *)
1118
					((u8 *)&array->entries[0] +
1119
					 (state_entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
1120
				rdev->pm.dpm.vce_states[i].evclk =
1121
					le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16);
1122
				rdev->pm.dpm.vce_states[i].ecclk =
1123
					le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16);
1124
				rdev->pm.dpm.vce_states[i].clk_idx =
1125
					state_entry->ucClockInfoIndex & 0x3f;
1126
				rdev->pm.dpm.vce_states[i].pstate =
1127
					(state_entry->ucClockInfoIndex & 0xc0) >> 6;
1128
				state_entry = (ATOM_PPLIB_VCE_State_Record *)
1129
					((u8 *)state_entry + sizeof(ATOM_PPLIB_VCE_State_Record));
1130
			}
1131
		}
1132
		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3) &&
1133
			ext_hdr->usUVDTableOffset) {
1134
			UVDClockInfoArray *array = (UVDClockInfoArray *)
1135
				(mode_info->atom_context->bios + data_offset +
1136
				 le16_to_cpu(ext_hdr->usUVDTableOffset) + 1);
1137
			ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *limits =
1138
				(ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *)
1139
				(mode_info->atom_context->bios + data_offset +
1140
				 le16_to_cpu(ext_hdr->usUVDTableOffset) + 1 +
1141
				 1 + (array->ucNumEntries * sizeof (UVDClockInfo)));
1142
			ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *entry;
1143
			u32 size = limits->numEntries *
1144
				sizeof(struct radeon_uvd_clock_voltage_dependency_entry);
1145
			rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries =
1146
				kzalloc(size, GFP_KERNEL);
1147
			if (!rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries) {
1148
				r600_free_extended_power_table(rdev);
1149
				return -ENOMEM;
1150
			}
1151
			rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count =
1152
				limits->numEntries;
1153
			entry = &limits->entries[0];
1154
			for (i = 0; i < limits->numEntries; i++) {
1155
				UVDClockInfo *uvd_clk = (UVDClockInfo *)
1156
					((u8 *)&array->entries[0] +
1157
					 (entry->ucUVDClockInfoIndex * sizeof(UVDClockInfo)));
1158
				rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].vclk =
1159
					le16_to_cpu(uvd_clk->usVClkLow) | (uvd_clk->ucVClkHigh << 16);
1160
				rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].dclk =
1161
					le16_to_cpu(uvd_clk->usDClkLow) | (uvd_clk->ucDClkHigh << 16);
1162
				rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v =
1163
					le16_to_cpu(entry->usVoltage);
1164
				entry = (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *)
1165
					((u8 *)entry + sizeof(ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record));
1166
			}
1167
		}
1168
		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4) &&
1169
			ext_hdr->usSAMUTableOffset) {
1170
			ATOM_PPLIB_SAMClk_Voltage_Limit_Table *limits =
1171
				(ATOM_PPLIB_SAMClk_Voltage_Limit_Table *)
1172
				(mode_info->atom_context->bios + data_offset +
1173
				 le16_to_cpu(ext_hdr->usSAMUTableOffset) + 1);
1174
			ATOM_PPLIB_SAMClk_Voltage_Limit_Record *entry;
1175
			u32 size = limits->numEntries *
1176
				sizeof(struct radeon_clock_voltage_dependency_entry);
1177
			rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries =
1178
				kzalloc(size, GFP_KERNEL);
1179
			if (!rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries) {
1180
				r600_free_extended_power_table(rdev);
1181
				return -ENOMEM;
1182
			}
1183
			rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count =
1184
				limits->numEntries;
1185
			entry = &limits->entries[0];
1186
			for (i = 0; i < limits->numEntries; i++) {
1187
				rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].clk =
1188
					le16_to_cpu(entry->usSAMClockLow) | (entry->ucSAMClockHigh << 16);
1189
				rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v =
1190
					le16_to_cpu(entry->usVoltage);
1191
				entry = (ATOM_PPLIB_SAMClk_Voltage_Limit_Record *)
1192
					((u8 *)entry + sizeof(ATOM_PPLIB_SAMClk_Voltage_Limit_Record));
1193
			}
1194
		}
1195
		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5) &&
1196
		    ext_hdr->usPPMTableOffset) {
1197
			ATOM_PPLIB_PPM_Table *ppm = (ATOM_PPLIB_PPM_Table *)
1198
				(mode_info->atom_context->bios + data_offset +
1199
				 le16_to_cpu(ext_hdr->usPPMTableOffset));
1200
			rdev->pm.dpm.dyn_state.ppm_table =
1201
				kzalloc(sizeof(struct radeon_ppm_table), GFP_KERNEL);
1202
			if (!rdev->pm.dpm.dyn_state.ppm_table) {
1203
				r600_free_extended_power_table(rdev);
1204
				return -ENOMEM;
1205
			}
1206
			rdev->pm.dpm.dyn_state.ppm_table->ppm_design = ppm->ucPpmDesign;
1207
			rdev->pm.dpm.dyn_state.ppm_table->cpu_core_number =
1208
				le16_to_cpu(ppm->usCpuCoreNumber);
1209
			rdev->pm.dpm.dyn_state.ppm_table->platform_tdp =
1210
				le32_to_cpu(ppm->ulPlatformTDP);
1211
			rdev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdp =
1212
				le32_to_cpu(ppm->ulSmallACPlatformTDP);
1213
			rdev->pm.dpm.dyn_state.ppm_table->platform_tdc =
1214
				le32_to_cpu(ppm->ulPlatformTDC);
1215
			rdev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdc =
1216
				le32_to_cpu(ppm->ulSmallACPlatformTDC);
1217
			rdev->pm.dpm.dyn_state.ppm_table->apu_tdp =
1218
				le32_to_cpu(ppm->ulApuTDP);
1219
			rdev->pm.dpm.dyn_state.ppm_table->dgpu_tdp =
1220
				le32_to_cpu(ppm->ulDGpuTDP);
1221
			rdev->pm.dpm.dyn_state.ppm_table->dgpu_ulv_power =
1222
				le32_to_cpu(ppm->ulDGpuUlvPower);
1223
			rdev->pm.dpm.dyn_state.ppm_table->tj_max =
1224
				le32_to_cpu(ppm->ulTjmax);
1225
		}
1226
		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6) &&
1227
			ext_hdr->usACPTableOffset) {
1228
			ATOM_PPLIB_ACPClk_Voltage_Limit_Table *limits =
1229
				(ATOM_PPLIB_ACPClk_Voltage_Limit_Table *)
1230
				(mode_info->atom_context->bios + data_offset +
1231
				 le16_to_cpu(ext_hdr->usACPTableOffset) + 1);
1232
			ATOM_PPLIB_ACPClk_Voltage_Limit_Record *entry;
1233
			u32 size = limits->numEntries *
1234
				sizeof(struct radeon_clock_voltage_dependency_entry);
1235
			rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries =
1236
				kzalloc(size, GFP_KERNEL);
1237
			if (!rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries) {
1238
				r600_free_extended_power_table(rdev);
1239
				return -ENOMEM;
1240
			}
1241
			rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count =
1242
				limits->numEntries;
1243
			entry = &limits->entries[0];
1244
			for (i = 0; i < limits->numEntries; i++) {
1245
				rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].clk =
1246
					le16_to_cpu(entry->usACPClockLow) | (entry->ucACPClockHigh << 16);
1247
				rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v =
1248
					le16_to_cpu(entry->usVoltage);
1249
				entry = (ATOM_PPLIB_ACPClk_Voltage_Limit_Record *)
1250
					((u8 *)entry + sizeof(ATOM_PPLIB_ACPClk_Voltage_Limit_Record));
1251
			}
1252
		}
1253
		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7) &&
1254
			ext_hdr->usPowerTuneTableOffset) {
1255
			u8 rev = *(u8 *)(mode_info->atom_context->bios + data_offset +
1256
					 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
1257
			ATOM_PowerTune_Table *pt;
1258
			rdev->pm.dpm.dyn_state.cac_tdp_table =
1259
				kzalloc(sizeof(struct radeon_cac_tdp_table), GFP_KERNEL);
1260
			if (!rdev->pm.dpm.dyn_state.cac_tdp_table) {
1261
				r600_free_extended_power_table(rdev);
1262
				return -ENOMEM;
1263
			}
1264
			if (rev > 0) {
1265
				ATOM_PPLIB_POWERTUNE_Table_V1 *ppt = (ATOM_PPLIB_POWERTUNE_Table_V1 *)
1266
					(mode_info->atom_context->bios + data_offset +
1267
					 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
1268
				rdev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit =
5271 serge 1269
					le16_to_cpu(ppt->usMaximumPowerDeliveryLimit);
5078 serge 1270
				pt = &ppt->power_tune_table;
1271
			} else {
1272
				ATOM_PPLIB_POWERTUNE_Table *ppt = (ATOM_PPLIB_POWERTUNE_Table *)
1273
					(mode_info->atom_context->bios + data_offset +
1274
					 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
1275
				rdev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit = 255;
1276
				pt = &ppt->power_tune_table;
1277
			}
1278
			rdev->pm.dpm.dyn_state.cac_tdp_table->tdp = le16_to_cpu(pt->usTDP);
1279
			rdev->pm.dpm.dyn_state.cac_tdp_table->configurable_tdp =
1280
				le16_to_cpu(pt->usConfigurableTDP);
1281
			rdev->pm.dpm.dyn_state.cac_tdp_table->tdc = le16_to_cpu(pt->usTDC);
1282
			rdev->pm.dpm.dyn_state.cac_tdp_table->battery_power_limit =
1283
				le16_to_cpu(pt->usBatteryPowerLimit);
1284
			rdev->pm.dpm.dyn_state.cac_tdp_table->small_power_limit =
1285
				le16_to_cpu(pt->usSmallPowerLimit);
1286
			rdev->pm.dpm.dyn_state.cac_tdp_table->low_cac_leakage =
1287
				le16_to_cpu(pt->usLowCACLeakage);
1288
			rdev->pm.dpm.dyn_state.cac_tdp_table->high_cac_leakage =
1289
				le16_to_cpu(pt->usHighCACLeakage);
1290
		}
1291
	}
1292
 
1293
	return 0;
1294
}
1295
 
1296
void r600_free_extended_power_table(struct radeon_device *rdev)
1297
{
1298
	struct radeon_dpm_dynamic_state *dyn_state = &rdev->pm.dpm.dyn_state;
1299
 
1300
	kfree(dyn_state->vddc_dependency_on_sclk.entries);
1301
	kfree(dyn_state->vddci_dependency_on_mclk.entries);
1302
	kfree(dyn_state->vddc_dependency_on_mclk.entries);
1303
	kfree(dyn_state->mvdd_dependency_on_mclk.entries);
1304
	kfree(dyn_state->cac_leakage_table.entries);
1305
	kfree(dyn_state->phase_shedding_limits_table.entries);
1306
	kfree(dyn_state->ppm_table);
1307
	kfree(dyn_state->cac_tdp_table);
1308
	kfree(dyn_state->vce_clock_voltage_dependency_table.entries);
1309
	kfree(dyn_state->uvd_clock_voltage_dependency_table.entries);
1310
	kfree(dyn_state->samu_clock_voltage_dependency_table.entries);
1311
	kfree(dyn_state->acp_clock_voltage_dependency_table.entries);
1312
}
1313
 
1314
enum radeon_pcie_gen r600_get_pcie_gen_support(struct radeon_device *rdev,
1315
					       u32 sys_mask,
1316
					       enum radeon_pcie_gen asic_gen,
1317
					       enum radeon_pcie_gen default_gen)
1318
{
1319
	switch (asic_gen) {
1320
	case RADEON_PCIE_GEN1:
1321
		return RADEON_PCIE_GEN1;
1322
	case RADEON_PCIE_GEN2:
1323
		return RADEON_PCIE_GEN2;
1324
	case RADEON_PCIE_GEN3:
1325
		return RADEON_PCIE_GEN3;
1326
	default:
1327
		if ((sys_mask & DRM_PCIE_SPEED_80) && (default_gen == RADEON_PCIE_GEN3))
1328
			return RADEON_PCIE_GEN3;
1329
		else if ((sys_mask & DRM_PCIE_SPEED_50) && (default_gen == RADEON_PCIE_GEN2))
1330
			return RADEON_PCIE_GEN2;
1331
		else
1332
			return RADEON_PCIE_GEN1;
1333
	}
1334
	return RADEON_PCIE_GEN1;
1335
}
1336
 
1337
u16 r600_get_pcie_lane_support(struct radeon_device *rdev,
1338
			       u16 asic_lanes,
1339
			       u16 default_lanes)
1340
{
1341
	switch (asic_lanes) {
1342
	case 0:
1343
	default:
1344
		return default_lanes;
1345
	case 1:
1346
		return 1;
1347
	case 2:
1348
		return 2;
1349
	case 4:
1350
		return 4;
1351
	case 8:
1352
		return 8;
1353
	case 12:
1354
		return 12;
1355
	case 16:
1356
		return 16;
1357
	}
1358
}
1359
 
1360
u8 r600_encode_pci_lane_width(u32 lanes)
1361
{
1362
	u8 encoded_lanes[] = { 0, 1, 2, 0, 3, 0, 0, 0, 4, 0, 0, 0, 5, 0, 0, 0, 6 };
1363
 
1364
	if (lanes > 16)
1365
		return 0;
1366
 
1367
	return encoded_lanes[lanes];
1368
}