Subversion Repositories Kolibri OS

Rev

Rev 5078 | Rev 5346 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
1268 serge 1
/*
2
 * Permission is hereby granted, free of charge, to any person obtaining a
3
 * copy of this software and associated documentation files (the "Software"),
4
 * to deal in the Software without restriction, including without limitation
5
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
6
 * and/or sell copies of the Software, and to permit persons to whom the
7
 * Software is furnished to do so, subject to the following conditions:
8
 *
9
 * The above copyright notice and this permission notice shall be included in
10
 * all copies or substantial portions of the Software.
11
 *
12
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
13
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
15
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
16
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
17
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
18
 * OTHER DEALINGS IN THE SOFTWARE.
19
 *
20
 * Authors: Rafał Miłecki 
1430 serge 21
 *          Alex Deucher 
1268 serge 22
 */
2997 Serge 23
#include 
1268 serge 24
#include "radeon.h"
1430 serge 25
#include "avivod.h"
1986 serge 26
#include "atom.h"
1268 serge 27
 
1963 serge 28
 
1430 serge 29
#define RADEON_IDLE_LOOP_MS 100
30
#define RADEON_RECLOCK_DELAY_MS 200
31
#define RADEON_WAIT_VBLANK_TIMEOUT 200
1268 serge 32
 
1963 serge 33
static const char *radeon_pm_state_type_name[5] = {
2997 Serge 34
	"",
1430 serge 35
	"Powersave",
36
	"Battery",
37
	"Balanced",
38
	"Performance",
39
};
40
 
1963 serge 41
static void radeon_dynpm_idle_work_handler(struct work_struct *work);
42
static int radeon_debugfs_pm_init(struct radeon_device *rdev);
43
static bool radeon_pm_in_vbl(struct radeon_device *rdev);
44
static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish);
45
static void radeon_pm_update_profile(struct radeon_device *rdev);
46
static void radeon_pm_set_clocks(struct radeon_device *rdev);
47
 
2997 Serge 48
int radeon_pm_get_type_index(struct radeon_device *rdev,
49
			     enum radeon_pm_state_type ps_type,
50
			     int instance)
51
{
52
	int i;
53
	int found_instance = -1;
1963 serge 54
 
2997 Serge 55
	for (i = 0; i < rdev->pm.num_power_states; i++) {
56
		if (rdev->pm.power_state[i].type == ps_type) {
57
			found_instance++;
58
			if (found_instance == instance)
59
				return i;
60
		}
61
	}
62
	/* return default if no match */
63
	return rdev->pm.default_power_state_index;
64
}
1963 serge 65
 
2997 Serge 66
void radeon_pm_acpi_event_handler(struct radeon_device *rdev)
1430 serge 67
{
5078 serge 68
	if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
69
		mutex_lock(&rdev->pm.mutex);
70
		if (power_supply_is_system_supplied() > 0)
71
			rdev->pm.dpm.ac_power = true;
72
		else
73
			rdev->pm.dpm.ac_power = false;
74
		if (rdev->family == CHIP_ARUBA) {
75
		if (rdev->asic->dpm.enable_bapm)
76
			radeon_dpm_enable_bapm(rdev, rdev->pm.dpm.ac_power);
77
		}
78
		mutex_unlock(&rdev->pm.mutex);
79
        } else if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
1963 serge 80
			if (rdev->pm.profile == PM_PROFILE_AUTO) {
81
				mutex_lock(&rdev->pm.mutex);
82
				radeon_pm_update_profile(rdev);
83
				radeon_pm_set_clocks(rdev);
84
				mutex_unlock(&rdev->pm.mutex);
1430 serge 85
		}
86
	}
87
}
88
 
1963 serge 89
static void radeon_pm_update_profile(struct radeon_device *rdev)
1430 serge 90
{
1963 serge 91
	switch (rdev->pm.profile) {
92
	case PM_PROFILE_DEFAULT:
93
		rdev->pm.profile_index = PM_PROFILE_DEFAULT_IDX;
1430 serge 94
		break;
1963 serge 95
	case PM_PROFILE_AUTO:
96
		if (power_supply_is_system_supplied() > 0) {
97
			if (rdev->pm.active_crtc_count > 1)
98
				rdev->pm.profile_index = PM_PROFILE_HIGH_MH_IDX;
99
			else
100
				rdev->pm.profile_index = PM_PROFILE_HIGH_SH_IDX;
1430 serge 101
		} else {
1963 serge 102
			if (rdev->pm.active_crtc_count > 1)
103
				rdev->pm.profile_index = PM_PROFILE_MID_MH_IDX;
104
			else
105
				rdev->pm.profile_index = PM_PROFILE_MID_SH_IDX;
1430 serge 106
		}
107
		break;
1963 serge 108
	case PM_PROFILE_LOW:
109
		if (rdev->pm.active_crtc_count > 1)
110
			rdev->pm.profile_index = PM_PROFILE_LOW_MH_IDX;
111
		else
112
			rdev->pm.profile_index = PM_PROFILE_LOW_SH_IDX;
1430 serge 113
		break;
1963 serge 114
	case PM_PROFILE_MID:
115
		if (rdev->pm.active_crtc_count > 1)
116
			rdev->pm.profile_index = PM_PROFILE_MID_MH_IDX;
117
		else
118
			rdev->pm.profile_index = PM_PROFILE_MID_SH_IDX;
119
		break;
120
	case PM_PROFILE_HIGH:
121
		if (rdev->pm.active_crtc_count > 1)
122
			rdev->pm.profile_index = PM_PROFILE_HIGH_MH_IDX;
123
		else
124
			rdev->pm.profile_index = PM_PROFILE_HIGH_SH_IDX;
125
		break;
1430 serge 126
	}
127
 
1963 serge 128
	if (rdev->pm.active_crtc_count == 0) {
129
		rdev->pm.requested_power_state_index =
130
			rdev->pm.profiles[rdev->pm.profile_index].dpms_off_ps_idx;
131
		rdev->pm.requested_clock_mode_index =
132
			rdev->pm.profiles[rdev->pm.profile_index].dpms_off_cm_idx;
133
	} else {
134
		rdev->pm.requested_power_state_index =
135
			rdev->pm.profiles[rdev->pm.profile_index].dpms_on_ps_idx;
136
		rdev->pm.requested_clock_mode_index =
137
			rdev->pm.profiles[rdev->pm.profile_index].dpms_on_cm_idx;
1430 serge 138
		}
1963 serge 139
}
140
 
141
static void radeon_unmap_vram_bos(struct radeon_device *rdev)
142
{
143
	struct radeon_bo *bo, *n;
144
 
145
	if (list_empty(&rdev->gem.objects))
146
		return;
147
 
5078 serge 148
	list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) {
149
		if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
150
			ttm_bo_unmap_virtual(&bo->tbo);
151
	}
1963 serge 152
}
153
 
2997 Serge 154
static void radeon_sync_with_vblank(struct radeon_device *rdev)
155
{
156
	if (rdev->pm.active_crtcs) {
157
		rdev->pm.vblank_sync = false;
158
//       wait_event_timeout(
159
//           rdev->irq.vblank_queue, rdev->pm.vblank_sync,
160
//           msecs_to_jiffies(RADEON_WAIT_VBLANK_TIMEOUT));
161
    }
162
}
1963 serge 163
 
164
static void radeon_set_power_state(struct radeon_device *rdev)
165
{
166
	u32 sclk, mclk;
167
	bool misc_after = false;
168
 
169
	if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) &&
170
	    (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index))
171
		return;
172
 
173
	if (radeon_gui_idle(rdev)) {
174
		sclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
175
			clock_info[rdev->pm.requested_clock_mode_index].sclk;
176
		if (sclk > rdev->pm.default_sclk)
177
			sclk = rdev->pm.default_sclk;
178
 
2997 Serge 179
		/* starting with BTC, there is one state that is used for both
180
		 * MH and SH.  Difference is that we always use the high clock index for
3764 Serge 181
		 * mclk and vddci.
2997 Serge 182
		 */
183
		if ((rdev->pm.pm_method == PM_METHOD_PROFILE) &&
184
		    (rdev->family >= CHIP_BARTS) &&
185
		    rdev->pm.active_crtc_count &&
186
		    ((rdev->pm.profile_index == PM_PROFILE_MID_MH_IDX) ||
187
		     (rdev->pm.profile_index == PM_PROFILE_LOW_MH_IDX)))
188
			mclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
189
				clock_info[rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx].mclk;
190
		else
1963 serge 191
		mclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
192
			clock_info[rdev->pm.requested_clock_mode_index].mclk;
2997 Serge 193
 
1963 serge 194
		if (mclk > rdev->pm.default_mclk)
195
			mclk = rdev->pm.default_mclk;
196
 
197
		/* upvolt before raising clocks, downvolt after lowering clocks */
198
		if (sclk < rdev->pm.current_sclk)
199
			misc_after = true;
200
 
2997 Serge 201
		radeon_sync_with_vblank(rdev);
1963 serge 202
 
203
		if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
204
			if (!radeon_pm_in_vbl(rdev))
205
				return;
206
		}
207
 
208
		radeon_pm_prepare(rdev);
209
 
210
		if (!misc_after)
211
			/* voltage, pcie lanes, etc.*/
212
			radeon_pm_misc(rdev);
213
 
214
	/* set engine clock */
215
		if (sclk != rdev->pm.current_sclk) {
216
	radeon_pm_debug_check_in_vbl(rdev, false);
217
			radeon_set_engine_clock(rdev, sclk);
218
	radeon_pm_debug_check_in_vbl(rdev, true);
219
			rdev->pm.current_sclk = sclk;
220
			DRM_DEBUG_DRIVER("Setting: e: %d\n", sclk);
221
		}
222
 
223
	/* set memory clock */
2997 Serge 224
		if (rdev->asic->pm.set_memory_clock && (mclk != rdev->pm.current_mclk)) {
1963 serge 225
		radeon_pm_debug_check_in_vbl(rdev, false);
226
			radeon_set_memory_clock(rdev, mclk);
227
		radeon_pm_debug_check_in_vbl(rdev, true);
228
			rdev->pm.current_mclk = mclk;
229
			DRM_DEBUG_DRIVER("Setting: m: %d\n", mclk);
230
		}
231
 
232
		if (misc_after)
233
			/* voltage, pcie lanes, etc.*/
234
			radeon_pm_misc(rdev);
235
 
236
		radeon_pm_finish(rdev);
237
 
238
		rdev->pm.current_power_state_index = rdev->pm.requested_power_state_index;
239
		rdev->pm.current_clock_mode_index = rdev->pm.requested_clock_mode_index;
240
	} else
241
		DRM_DEBUG_DRIVER("pm: GUI not idle!!!\n");
242
}
243
 
244
static void radeon_pm_set_clocks(struct radeon_device *rdev)
245
{
3764 Serge 246
	int i, r;
1963 serge 247
 
248
	/* no need to take locks, etc. if nothing's going to change */
249
	if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) &&
250
	    (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index))
251
		return;
252
 
253
	mutex_lock(&rdev->ddev->struct_mutex);
2997 Serge 254
//   down_write(&rdev->pm.mclk_lock);
255
	mutex_lock(&rdev->ring_lock);
1963 serge 256
 
2997 Serge 257
	/* wait for the rings to drain */
258
	for (i = 0; i < RADEON_NUM_RINGS; i++) {
259
		struct radeon_ring *ring = &rdev->ring[i];
3764 Serge 260
		if (!ring->ready) {
261
			continue;
262
		}
5078 serge 263
		r = radeon_fence_wait_empty(rdev, i);
3764 Serge 264
		if (r) {
265
			/* needs a GPU reset dont reset here */
266
			mutex_unlock(&rdev->ring_lock);
267
//			up_write(&rdev->pm.mclk_lock);
268
			mutex_unlock(&rdev->ddev->struct_mutex);
269
			return;
270
		}
1430 serge 271
	}
2997 Serge 272
 
1963 serge 273
	radeon_unmap_vram_bos(rdev);
1430 serge 274
 
1963 serge 275
	if (rdev->irq.installed) {
276
		for (i = 0; i < rdev->num_crtc; i++) {
277
			if (rdev->pm.active_crtcs & (1 << i)) {
278
				rdev->pm.req_vblank |= (1 << i);
279
//               drm_vblank_get(rdev->ddev, i);
280
			}
281
		}
282
	}
283
 
284
	radeon_set_power_state(rdev);
285
 
286
	if (rdev->irq.installed) {
287
		for (i = 0; i < rdev->num_crtc; i++) {
288
			if (rdev->pm.req_vblank & (1 << i)) {
289
				rdev->pm.req_vblank &= ~(1 << i);
290
//               drm_vblank_put(rdev->ddev, i);
291
			}
292
		}
293
	}
294
 
295
	/* update display watermarks based on new power state */
296
	radeon_update_bandwidth_info(rdev);
297
	if (rdev->pm.active_crtc_count)
298
		radeon_bandwidth_update(rdev);
299
 
300
	rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
301
 
2997 Serge 302
	mutex_unlock(&rdev->ring_lock);
303
//   up_write(&rdev->pm.mclk_lock);
1963 serge 304
	mutex_unlock(&rdev->ddev->struct_mutex);
1430 serge 305
}
306
 
1963 serge 307
static void radeon_pm_print_states(struct radeon_device *rdev)
1430 serge 308
{
1963 serge 309
	int i, j;
310
	struct radeon_power_state *power_state;
311
	struct radeon_pm_clock_info *clock_info;
312
 
313
	DRM_DEBUG_DRIVER("%d Power State(s)\n", rdev->pm.num_power_states);
314
	for (i = 0; i < rdev->pm.num_power_states; i++) {
315
		power_state = &rdev->pm.power_state[i];
316
		DRM_DEBUG_DRIVER("State %d: %s\n", i,
317
			radeon_pm_state_type_name[power_state->type]);
318
		if (i == rdev->pm.default_power_state_index)
319
			DRM_DEBUG_DRIVER("\tDefault");
320
		if ((rdev->flags & RADEON_IS_PCIE) && !(rdev->flags & RADEON_IS_IGP))
321
			DRM_DEBUG_DRIVER("\t%d PCIE Lanes\n", power_state->pcie_lanes);
322
		if (power_state->flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
323
			DRM_DEBUG_DRIVER("\tSingle display only\n");
324
		DRM_DEBUG_DRIVER("\t%d Clock Mode(s)\n", power_state->num_clock_modes);
325
		for (j = 0; j < power_state->num_clock_modes; j++) {
326
			clock_info = &(power_state->clock_info[j]);
327
			if (rdev->flags & RADEON_IS_IGP)
2997 Serge 328
				DRM_DEBUG_DRIVER("\t\t%d e: %d\n",
1963 serge 329
					j,
2997 Serge 330
						 clock_info->sclk * 10);
1963 serge 331
			else
2997 Serge 332
				DRM_DEBUG_DRIVER("\t\t%d e: %d\tm: %d\tv: %d\n",
1963 serge 333
					j,
334
					clock_info->sclk * 10,
335
					clock_info->mclk * 10,
2997 Serge 336
						 clock_info->voltage.voltage);
1963 serge 337
		}
1430 serge 338
	}
1963 serge 339
}
1430 serge 340
 
1963 serge 341
static ssize_t radeon_get_pm_profile(struct device *dev,
342
				     struct device_attribute *attr,
343
				     char *buf)
344
{
5078 serge 345
	struct drm_device *ddev = dev_get_drvdata(dev);
2997 Serge 346
	struct radeon_device *rdev = ddev->dev_private;
347
	int cp = rdev->pm.profile;
1963 serge 348
 
2997 Serge 349
	return snprintf(buf, PAGE_SIZE, "%s\n",
350
			(cp == PM_PROFILE_AUTO) ? "auto" :
351
			(cp == PM_PROFILE_LOW) ? "low" :
352
			(cp == PM_PROFILE_MID) ? "mid" :
353
			(cp == PM_PROFILE_HIGH) ? "high" : "default");
1430 serge 354
}
355
 
1963 serge 356
static ssize_t radeon_set_pm_profile(struct device *dev,
357
				     struct device_attribute *attr,
358
				     const char *buf,
359
				     size_t count)
1430 serge 360
{
5078 serge 361
	struct drm_device *ddev = dev_get_drvdata(dev);
1963 serge 362
	struct radeon_device *rdev = ddev->dev_private;
363
 
5078 serge 364
	/* Can't set profile when the card is off */
365
	if  ((rdev->flags & RADEON_IS_PX) &&
366
	     (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
367
		return -EINVAL;
368
 
1963 serge 369
	mutex_lock(&rdev->pm.mutex);
2997 Serge 370
	if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
371
		if (strncmp("default", buf, strlen("default")) == 0)
1963 serge 372
    rdev->pm.profile = PM_PROFILE_DEFAULT;
2997 Serge 373
		else if (strncmp("auto", buf, strlen("auto")) == 0)
374
			rdev->pm.profile = PM_PROFILE_AUTO;
375
		else if (strncmp("low", buf, strlen("low")) == 0)
376
			rdev->pm.profile = PM_PROFILE_LOW;
377
		else if (strncmp("mid", buf, strlen("mid")) == 0)
378
			rdev->pm.profile = PM_PROFILE_MID;
379
		else if (strncmp("high", buf, strlen("high")) == 0)
380
			rdev->pm.profile = PM_PROFILE_HIGH;
381
		else {
382
			count = -EINVAL;
383
			goto fail;
384
		}
1963 serge 385
    radeon_pm_update_profile(rdev);
386
    radeon_pm_set_clocks(rdev);
2997 Serge 387
	} else
388
		count = -EINVAL;
389
 
1963 serge 390
fail:
391
	mutex_unlock(&rdev->pm.mutex);
392
 
393
	return count;
394
}
395
 
396
static ssize_t radeon_get_pm_method(struct device *dev,
397
				    struct device_attribute *attr,
398
				    char *buf)
399
{
5078 serge 400
	struct drm_device *ddev = dev_get_drvdata(dev);
1963 serge 401
	struct radeon_device *rdev = ddev->dev_private;
402
	int pm = rdev->pm.pm_method;
403
 
404
	return snprintf(buf, PAGE_SIZE, "%s\n",
5078 serge 405
			(pm == PM_METHOD_DYNPM) ? "dynpm" :
406
			(pm == PM_METHOD_PROFILE) ? "profile" : "dpm");
1963 serge 407
}
408
 
409
static ssize_t radeon_set_pm_method(struct device *dev,
410
				    struct device_attribute *attr,
411
				    const char *buf,
412
				    size_t count)
413
{
5078 serge 414
	struct drm_device *ddev = dev_get_drvdata(dev);
1963 serge 415
	struct radeon_device *rdev = ddev->dev_private;
416
 
5078 serge 417
	/* Can't set method when the card is off */
418
	if  ((rdev->flags & RADEON_IS_PX) &&
419
	     (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) {
420
		count = -EINVAL;
421
		goto fail;
422
	}
1963 serge 423
 
5078 serge 424
	/* we don't support the legacy modes with dpm */
425
	if (rdev->pm.pm_method == PM_METHOD_DPM) {
426
		count = -EINVAL;
427
		goto fail;
428
	}
429
 
1963 serge 430
	if (strncmp("dynpm", buf, strlen("dynpm")) == 0) {
431
		mutex_lock(&rdev->pm.mutex);
432
		rdev->pm.pm_method = PM_METHOD_DYNPM;
433
		rdev->pm.dynpm_state = DYNPM_STATE_PAUSED;
434
		rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
435
		mutex_unlock(&rdev->pm.mutex);
436
	} else if (strncmp("profile", buf, strlen("profile")) == 0) {
437
		mutex_lock(&rdev->pm.mutex);
438
		/* disable dynpm */
439
		rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
440
		rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
441
		rdev->pm.pm_method = PM_METHOD_PROFILE;
442
		mutex_unlock(&rdev->pm.mutex);
443
//		cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work);
444
	} else {
2997 Serge 445
		count = -EINVAL;
1963 serge 446
		goto fail;
447
	}
448
	radeon_pm_compute_clocks(rdev);
449
fail:
450
	return count;
451
}
452
 
5078 serge 453
static ssize_t radeon_get_dpm_state(struct device *dev,
454
				    struct device_attribute *attr,
455
				    char *buf)
456
{
457
	struct drm_device *ddev = dev_get_drvdata(dev);
458
	struct radeon_device *rdev = ddev->dev_private;
459
	enum radeon_pm_state_type pm = rdev->pm.dpm.user_state;
2997 Serge 460
 
5078 serge 461
	return snprintf(buf, PAGE_SIZE, "%s\n",
462
			(pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
463
			(pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance");
464
}
465
 
466
static ssize_t radeon_set_dpm_state(struct device *dev,
467
				    struct device_attribute *attr,
468
				    const char *buf,
469
				    size_t count)
470
{
471
	struct drm_device *ddev = dev_get_drvdata(dev);
472
	struct radeon_device *rdev = ddev->dev_private;
473
 
474
	mutex_lock(&rdev->pm.mutex);
475
	if (strncmp("battery", buf, strlen("battery")) == 0)
476
		rdev->pm.dpm.user_state = POWER_STATE_TYPE_BATTERY;
477
	else if (strncmp("balanced", buf, strlen("balanced")) == 0)
478
		rdev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED;
479
	else if (strncmp("performance", buf, strlen("performance")) == 0)
480
		rdev->pm.dpm.user_state = POWER_STATE_TYPE_PERFORMANCE;
481
	else {
482
		mutex_unlock(&rdev->pm.mutex);
483
		count = -EINVAL;
484
		goto fail;
485
	}
486
	mutex_unlock(&rdev->pm.mutex);
487
 
488
	/* Can't set dpm state when the card is off */
489
	if (!(rdev->flags & RADEON_IS_PX) ||
490
	    (ddev->switch_power_state == DRM_SWITCH_POWER_ON))
491
	radeon_pm_compute_clocks(rdev);
492
 
493
fail:
494
	return count;
495
}
496
 
497
static ssize_t radeon_get_dpm_forced_performance_level(struct device *dev,
498
						       struct device_attribute *attr,
499
						       char *buf)
500
{
501
	struct drm_device *ddev = dev_get_drvdata(dev);
502
	struct radeon_device *rdev = ddev->dev_private;
503
	enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level;
504
 
505
	if  ((rdev->flags & RADEON_IS_PX) &&
506
	     (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
507
		return snprintf(buf, PAGE_SIZE, "off\n");
508
 
509
	return snprintf(buf, PAGE_SIZE, "%s\n",
510
			(level == RADEON_DPM_FORCED_LEVEL_AUTO) ? "auto" :
511
			(level == RADEON_DPM_FORCED_LEVEL_LOW) ? "low" : "high");
512
}
513
 
514
static ssize_t radeon_set_dpm_forced_performance_level(struct device *dev,
515
						       struct device_attribute *attr,
516
						       const char *buf,
517
						       size_t count)
518
{
519
	struct drm_device *ddev = dev_get_drvdata(dev);
520
	struct radeon_device *rdev = ddev->dev_private;
521
	enum radeon_dpm_forced_level level;
522
	int ret = 0;
523
 
524
	/* Can't force performance level when the card is off */
525
	if  ((rdev->flags & RADEON_IS_PX) &&
526
	     (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
527
		return -EINVAL;
528
 
529
	mutex_lock(&rdev->pm.mutex);
530
	if (strncmp("low", buf, strlen("low")) == 0) {
531
		level = RADEON_DPM_FORCED_LEVEL_LOW;
532
	} else if (strncmp("high", buf, strlen("high")) == 0) {
533
		level = RADEON_DPM_FORCED_LEVEL_HIGH;
534
	} else if (strncmp("auto", buf, strlen("auto")) == 0) {
535
		level = RADEON_DPM_FORCED_LEVEL_AUTO;
536
	} else {
537
		count = -EINVAL;
538
		goto fail;
539
	}
540
	if (rdev->asic->dpm.force_performance_level) {
541
		if (rdev->pm.dpm.thermal_active) {
542
			count = -EINVAL;
543
			goto fail;
544
		}
545
		ret = radeon_dpm_force_performance_level(rdev, level);
546
		if (ret)
547
			count = -EINVAL;
548
	}
549
fail:
550
	mutex_unlock(&rdev->pm.mutex);
551
 
552
	return count;
553
}
554
 
555
 
1963 serge 556
static ssize_t radeon_hwmon_show_temp(struct device *dev,
557
				      struct device_attribute *attr,
558
				      char *buf)
559
{
5078 serge 560
	struct radeon_device *rdev = dev_get_drvdata(dev);
561
	struct drm_device *ddev = rdev->ddev;
2997 Serge 562
	int temp;
1963 serge 563
 
5078 serge 564
	/* Can't get temperature when the card is off */
565
	if  ((rdev->flags & RADEON_IS_PX) &&
566
	     (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
567
		return -EINVAL;
568
 
569
	if (rdev->asic->pm.get_temperature)
570
		temp = radeon_get_temperature(rdev);
571
	else
572
		temp = 0;
573
 
574
	return snprintf(buf, PAGE_SIZE, "%d\n", temp);
575
}
576
 
577
static ssize_t radeon_hwmon_show_temp_thresh(struct device *dev,
578
					     struct device_attribute *attr,
579
					     char *buf)
580
{
581
	struct radeon_device *rdev = dev_get_drvdata(dev);
582
//	int hyst = to_sensor_dev_attr(attr)->index;
583
	int temp;
584
 
585
//	if (hyst)
586
//		temp = rdev->pm.dpm.thermal.min_temp;
587
//	else
588
		temp = rdev->pm.dpm.thermal.max_temp;
589
 
590
	return snprintf(buf, PAGE_SIZE, "%d\n", temp);
591
}
592
 
593
 
594
static struct attribute *hwmon_attributes[] = {
595
//	&sensor_dev_attr_temp1_input.dev_attr.attr,
596
//	&sensor_dev_attr_temp1_crit.dev_attr.attr,
597
//	&sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
598
	NULL
599
};
600
 
601
 
602
 
603
static int radeon_hwmon_init(struct radeon_device *rdev)
604
{
605
	int err = 0;
606
 
1963 serge 607
	switch (rdev->pm.int_thermal_type) {
608
	case THERMAL_TYPE_RV6XX:
609
	case THERMAL_TYPE_RV770:
610
	case THERMAL_TYPE_EVERGREEN:
611
	case THERMAL_TYPE_NI:
2997 Serge 612
	case THERMAL_TYPE_SUMO:
613
	case THERMAL_TYPE_SI:
5078 serge 614
	case THERMAL_TYPE_CI:
615
	case THERMAL_TYPE_KV:
616
		if (rdev->asic->pm.get_temperature == NULL)
617
			return err;
618
 
2997 Serge 619
		break;
1430 serge 620
	default:
1963 serge 621
		break;
1430 serge 622
	}
1963 serge 623
 
5078 serge 624
	return err;
1430 serge 625
}
626
 
5078 serge 627
static void radeon_hwmon_fini(struct radeon_device *rdev)
1430 serge 628
{
5078 serge 629
//   if (rdev->pm.int_hwmon_dev)
630
//       hwmon_device_unregister(rdev->pm.int_hwmon_dev);
1963 serge 631
}
1430 serge 632
 
5078 serge 633
static void radeon_dpm_thermal_work_handler(struct work_struct *work)
1963 serge 634
{
5078 serge 635
	struct radeon_device *rdev =
636
		container_of(work, struct radeon_device,
637
			     pm.dpm.thermal.work);
638
	/* switch to the thermal state */
639
	enum radeon_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL;
1430 serge 640
 
5078 serge 641
	if (!rdev->pm.dpm_enabled)
642
		return;
1963 serge 643
 
5078 serge 644
	if (rdev->asic->pm.get_temperature) {
645
		int temp = radeon_get_temperature(rdev);
646
 
647
		if (temp < rdev->pm.dpm.thermal.min_temp)
648
			/* switch back the user state */
649
			dpm_state = rdev->pm.dpm.user_state;
650
	} else {
651
		if (rdev->pm.dpm.thermal.high_to_low)
652
			/* switch back the user state */
653
			dpm_state = rdev->pm.dpm.user_state;
654
	}
655
	mutex_lock(&rdev->pm.mutex);
656
	if (dpm_state == POWER_STATE_TYPE_INTERNAL_THERMAL)
657
		rdev->pm.dpm.thermal_active = true;
658
	else
659
		rdev->pm.dpm.thermal_active = false;
660
	rdev->pm.dpm.state = dpm_state;
661
	mutex_unlock(&rdev->pm.mutex);
662
 
663
	radeon_pm_compute_clocks(rdev);
1430 serge 664
}
665
 
5078 serge 666
static struct radeon_ps *radeon_dpm_pick_power_state(struct radeon_device *rdev,
667
						     enum radeon_pm_state_type dpm_state)
1963 serge 668
{
5078 serge 669
	int i;
670
	struct radeon_ps *ps;
671
	u32 ui_class;
672
	bool single_display = (rdev->pm.dpm.new_active_crtc_count < 2) ?
673
		true : false;
674
 
675
	/* check if the vblank period is too short to adjust the mclk */
676
	if (single_display && rdev->asic->dpm.vblank_too_short) {
677
		if (radeon_dpm_vblank_too_short(rdev))
678
			single_display = false;
679
	}
680
 
681
	/* certain older asics have a separare 3D performance state,
682
	 * so try that first if the user selected performance
683
	 */
684
	if (dpm_state == POWER_STATE_TYPE_PERFORMANCE)
685
		dpm_state = POWER_STATE_TYPE_INTERNAL_3DPERF;
686
	/* balanced states don't exist at the moment */
687
	if (dpm_state == POWER_STATE_TYPE_BALANCED)
688
		dpm_state = POWER_STATE_TYPE_PERFORMANCE;
689
 
690
restart_search:
691
	/* Pick the best power state based on current conditions */
692
	for (i = 0; i < rdev->pm.dpm.num_ps; i++) {
693
		ps = &rdev->pm.dpm.ps[i];
694
		ui_class = ps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK;
695
		switch (dpm_state) {
696
		/* user states */
697
		case POWER_STATE_TYPE_BATTERY:
698
			if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) {
699
				if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
700
					if (single_display)
701
						return ps;
702
				} else
703
					return ps;
704
			}
705
			break;
706
		case POWER_STATE_TYPE_BALANCED:
707
			if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BALANCED) {
708
				if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
709
					if (single_display)
710
						return ps;
711
				} else
712
					return ps;
713
			}
714
		break;
715
		case POWER_STATE_TYPE_PERFORMANCE:
716
			if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) {
717
				if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
718
					if (single_display)
719
						return ps;
720
				} else
721
					return ps;
722
			}
723
			break;
724
		/* internal states */
725
		case POWER_STATE_TYPE_INTERNAL_UVD:
726
			if (rdev->pm.dpm.uvd_ps)
727
				return rdev->pm.dpm.uvd_ps;
728
			else
729
				break;
730
		case POWER_STATE_TYPE_INTERNAL_UVD_SD:
731
			if (ps->class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
732
				return ps;
733
			break;
734
		case POWER_STATE_TYPE_INTERNAL_UVD_HD:
735
			if (ps->class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
736
				return ps;
737
			break;
738
		case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
739
			if (ps->class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
740
				return ps;
741
			break;
742
		case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
743
			if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
744
				return ps;
745
			break;
746
		case POWER_STATE_TYPE_INTERNAL_BOOT:
747
			return rdev->pm.dpm.boot_ps;
748
		case POWER_STATE_TYPE_INTERNAL_THERMAL:
749
			if (ps->class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
750
				return ps;
751
			break;
752
		case POWER_STATE_TYPE_INTERNAL_ACPI:
753
			if (ps->class & ATOM_PPLIB_CLASSIFICATION_ACPI)
754
				return ps;
755
			break;
756
		case POWER_STATE_TYPE_INTERNAL_ULV:
757
			if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
758
				return ps;
759
		break;
760
		case POWER_STATE_TYPE_INTERNAL_3DPERF:
761
			if (ps->class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
762
				return ps;
763
		break;
764
	default:
765
			break;
766
		}
767
	}
768
	/* use a fallback state if we didn't match */
769
	switch (dpm_state) {
770
	case POWER_STATE_TYPE_INTERNAL_UVD_SD:
771
		dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
772
		goto restart_search;
773
	case POWER_STATE_TYPE_INTERNAL_UVD_HD:
774
	case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
775
	case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
776
		if (rdev->pm.dpm.uvd_ps) {
777
			return rdev->pm.dpm.uvd_ps;
778
		} else {
779
			dpm_state = POWER_STATE_TYPE_PERFORMANCE;
780
			goto restart_search;
781
		}
782
	case POWER_STATE_TYPE_INTERNAL_THERMAL:
783
		dpm_state = POWER_STATE_TYPE_INTERNAL_ACPI;
784
		goto restart_search;
785
	case POWER_STATE_TYPE_INTERNAL_ACPI:
786
		dpm_state = POWER_STATE_TYPE_BATTERY;
787
		goto restart_search;
788
	case POWER_STATE_TYPE_BATTERY:
789
	case POWER_STATE_TYPE_BALANCED:
790
	case POWER_STATE_TYPE_INTERNAL_3DPERF:
791
		dpm_state = POWER_STATE_TYPE_PERFORMANCE;
792
		goto restart_search;
793
	default:
794
		break;
795
	}
796
 
797
	return NULL;
1963 serge 798
}
799
 
5078 serge 800
static void radeon_dpm_change_power_state_locked(struct radeon_device *rdev)
1963 serge 801
{
5078 serge 802
	int i;
803
	struct radeon_ps *ps;
804
	enum radeon_pm_state_type dpm_state;
805
	int ret;
806
 
807
	/* if dpm init failed */
808
	if (!rdev->pm.dpm_enabled)
809
		return;
810
 
811
	if (rdev->pm.dpm.user_state != rdev->pm.dpm.state) {
812
		/* add other state override checks here */
813
		if ((!rdev->pm.dpm.thermal_active) &&
814
		    (!rdev->pm.dpm.uvd_active))
815
			rdev->pm.dpm.state = rdev->pm.dpm.user_state;
816
	}
817
	dpm_state = rdev->pm.dpm.state;
818
 
819
	ps = radeon_dpm_pick_power_state(rdev, dpm_state);
820
	if (ps)
821
		rdev->pm.dpm.requested_ps = ps;
822
	else
823
		return;
824
 
825
	/* no need to reprogram if nothing changed unless we are on BTC+ */
826
	if (rdev->pm.dpm.current_ps == rdev->pm.dpm.requested_ps) {
827
		/* vce just modifies an existing state so force a change */
828
		if (ps->vce_active != rdev->pm.dpm.vce_active)
829
			goto force;
830
		if ((rdev->family < CHIP_BARTS) || (rdev->flags & RADEON_IS_IGP)) {
831
			/* for pre-BTC and APUs if the num crtcs changed but state is the same,
832
			 * all we need to do is update the display configuration.
833
			 */
834
			if (rdev->pm.dpm.new_active_crtcs != rdev->pm.dpm.current_active_crtcs) {
835
				/* update display watermarks based on new power state */
836
				radeon_bandwidth_update(rdev);
837
				/* update displays */
838
				radeon_dpm_display_configuration_changed(rdev);
839
				rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs;
840
				rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count;
841
			}
842
			return;
843
		} else {
844
			/* for BTC+ if the num crtcs hasn't changed and state is the same,
845
			 * nothing to do, if the num crtcs is > 1 and state is the same,
846
			 * update display configuration.
847
			 */
848
			if (rdev->pm.dpm.new_active_crtcs ==
849
			    rdev->pm.dpm.current_active_crtcs) {
850
				return;
851
			} else {
852
				if ((rdev->pm.dpm.current_active_crtc_count > 1) &&
853
				    (rdev->pm.dpm.new_active_crtc_count > 1)) {
854
					/* update display watermarks based on new power state */
855
					radeon_bandwidth_update(rdev);
856
					/* update displays */
857
					radeon_dpm_display_configuration_changed(rdev);
858
					rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs;
859
					rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count;
860
					return;
861
				}
862
			}
863
		}
864
	}
865
 
866
force:
867
	if (radeon_dpm == 1) {
868
		printk("switching from power state:\n");
869
		radeon_dpm_print_power_state(rdev, rdev->pm.dpm.current_ps);
870
		printk("switching to power state:\n");
871
		radeon_dpm_print_power_state(rdev, rdev->pm.dpm.requested_ps);
872
	}
873
 
874
	mutex_lock(&rdev->ddev->struct_mutex);
875
//   down_write(&rdev->pm.mclk_lock);
876
	mutex_lock(&rdev->ring_lock);
877
 
878
	/* update whether vce is active */
879
	ps->vce_active = rdev->pm.dpm.vce_active;
880
 
881
	ret = radeon_dpm_pre_set_power_state(rdev);
882
	if (ret)
883
		goto done;
884
 
885
	/* update display watermarks based on new power state */
886
	radeon_bandwidth_update(rdev);
887
	/* update displays */
888
	radeon_dpm_display_configuration_changed(rdev);
889
 
890
	rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs;
891
	rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count;
892
 
893
	/* wait for the rings to drain */
894
	for (i = 0; i < RADEON_NUM_RINGS; i++) {
895
		struct radeon_ring *ring = &rdev->ring[i];
896
		if (ring->ready)
897
			radeon_fence_wait_empty(rdev, i);
898
	}
899
 
900
	/* program the new power state */
901
	radeon_dpm_set_power_state(rdev);
902
 
903
	/* update current power state */
904
	rdev->pm.dpm.current_ps = rdev->pm.dpm.requested_ps;
905
 
906
	radeon_dpm_post_set_power_state(rdev);
907
 
908
	if (rdev->asic->dpm.force_performance_level) {
909
		if (rdev->pm.dpm.thermal_active) {
910
			enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level;
911
			/* force low perf level for thermal */
912
			radeon_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_LOW);
913
			/* save the user's level */
914
			rdev->pm.dpm.forced_level = level;
915
		} else {
916
			/* otherwise, user selected level */
917
			radeon_dpm_force_performance_level(rdev, rdev->pm.dpm.forced_level);
918
		}
919
	}
920
 
921
done:
922
	mutex_unlock(&rdev->ring_lock);
923
//   up_write(&rdev->pm.mclk_lock);
924
	mutex_unlock(&rdev->ddev->struct_mutex);
925
}
926
 
927
void radeon_dpm_enable_uvd(struct radeon_device *rdev, bool enable)
928
{
929
	enum radeon_pm_state_type dpm_state;
930
 
931
	if (rdev->asic->dpm.powergate_uvd) {
932
		mutex_lock(&rdev->pm.mutex);
933
		/* don't powergate anything if we
934
		   have active but pause streams */
935
		enable |= rdev->pm.dpm.sd > 0;
936
		enable |= rdev->pm.dpm.hd > 0;
937
		/* enable/disable UVD */
938
		radeon_dpm_powergate_uvd(rdev, !enable);
939
		mutex_unlock(&rdev->pm.mutex);
940
	} else {
941
		if (enable) {
942
			mutex_lock(&rdev->pm.mutex);
943
			rdev->pm.dpm.uvd_active = true;
944
			/* disable this for now */
945
#if 0
946
			if ((rdev->pm.dpm.sd == 1) && (rdev->pm.dpm.hd == 0))
947
				dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_SD;
948
			else if ((rdev->pm.dpm.sd == 2) && (rdev->pm.dpm.hd == 0))
949
				dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
950
			else if ((rdev->pm.dpm.sd == 0) && (rdev->pm.dpm.hd == 1))
951
				dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
952
			else if ((rdev->pm.dpm.sd == 0) && (rdev->pm.dpm.hd == 2))
953
				dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD2;
954
			else
955
#endif
956
				dpm_state = POWER_STATE_TYPE_INTERNAL_UVD;
957
			rdev->pm.dpm.state = dpm_state;
958
			mutex_unlock(&rdev->pm.mutex);
959
		} else {
960
			mutex_lock(&rdev->pm.mutex);
961
			rdev->pm.dpm.uvd_active = false;
962
			mutex_unlock(&rdev->pm.mutex);
963
		}
964
 
965
		radeon_pm_compute_clocks(rdev);
966
	}
967
}
968
 
969
void radeon_dpm_enable_vce(struct radeon_device *rdev, bool enable)
970
{
971
	if (enable) {
972
		mutex_lock(&rdev->pm.mutex);
973
		rdev->pm.dpm.vce_active = true;
974
		/* XXX select vce level based on ring/task */
975
		rdev->pm.dpm.vce_level = RADEON_VCE_LEVEL_AC_ALL;
976
		mutex_unlock(&rdev->pm.mutex);
977
	} else {
978
		mutex_lock(&rdev->pm.mutex);
979
		rdev->pm.dpm.vce_active = false;
980
		mutex_unlock(&rdev->pm.mutex);
981
	}
982
 
983
	radeon_pm_compute_clocks(rdev);
984
}
985
 
986
static void radeon_pm_suspend_old(struct radeon_device *rdev)
987
{
1963 serge 988
	mutex_lock(&rdev->pm.mutex);
989
	if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
990
		if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE)
991
			rdev->pm.dynpm_state = DYNPM_STATE_SUSPENDED;
992
	}
993
	mutex_unlock(&rdev->pm.mutex);
994
 
995
}
996
 
5078 serge 997
static void radeon_pm_suspend_dpm(struct radeon_device *rdev)
1963 serge 998
{
5078 serge 999
	mutex_lock(&rdev->pm.mutex);
1000
	/* disable dpm */
1001
	radeon_dpm_disable(rdev);
1002
	/* reset the power state */
1003
	rdev->pm.dpm.current_ps = rdev->pm.dpm.requested_ps = rdev->pm.dpm.boot_ps;
1004
	rdev->pm.dpm_enabled = false;
1005
	mutex_unlock(&rdev->pm.mutex);
1006
}
1007
 
1008
void radeon_pm_suspend(struct radeon_device *rdev)
1009
{
1010
	if (rdev->pm.pm_method == PM_METHOD_DPM)
1011
		radeon_pm_suspend_dpm(rdev);
1012
	else
1013
		radeon_pm_suspend_old(rdev);
1014
}
1015
 
1016
static void radeon_pm_resume_old(struct radeon_device *rdev)
1017
{
1018
	/* set up the default clocks if the MC ucode is loaded */
1019
	if ((rdev->family >= CHIP_BARTS) &&
1020
	    (rdev->family <= CHIP_CAYMAN) &&
1021
	    rdev->mc_fw) {
1022
		if (rdev->pm.default_vddc)
1023
			radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
1024
						SET_VOLTAGE_TYPE_ASIC_VDDC);
1025
		if (rdev->pm.default_vddci)
1026
			radeon_atom_set_voltage(rdev, rdev->pm.default_vddci,
1027
						SET_VOLTAGE_TYPE_ASIC_VDDCI);
1028
		if (rdev->pm.default_sclk)
1029
			radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
1030
		if (rdev->pm.default_mclk)
1031
			radeon_set_memory_clock(rdev, rdev->pm.default_mclk);
1032
	}
1963 serge 1033
	/* asic init will reset the default power state */
1034
	mutex_lock(&rdev->pm.mutex);
1035
	rdev->pm.current_power_state_index = rdev->pm.default_power_state_index;
1036
	rdev->pm.current_clock_mode_index = 0;
1037
	rdev->pm.current_sclk = rdev->pm.default_sclk;
1038
	rdev->pm.current_mclk = rdev->pm.default_mclk;
5078 serge 1039
	if (rdev->pm.power_state) {
1963 serge 1040
	rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage;
3764 Serge 1041
	rdev->pm.current_vddci = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.vddci;
5078 serge 1042
	}
1963 serge 1043
	if (rdev->pm.pm_method == PM_METHOD_DYNPM
1044
	    && rdev->pm.dynpm_state == DYNPM_STATE_SUSPENDED) {
1045
		rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
1046
//		schedule_delayed_work(&rdev->pm.dynpm_idle_work,
1047
//					msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
1048
	}
1049
	mutex_unlock(&rdev->pm.mutex);
1050
	radeon_pm_compute_clocks(rdev);
1051
}
1052
 
5078 serge 1053
static void radeon_pm_resume_dpm(struct radeon_device *rdev)
1268 serge 1054
{
1963 serge 1055
	int ret;
1430 serge 1056
 
5078 serge 1057
	/* asic init will reset to the boot state */
1058
	mutex_lock(&rdev->pm.mutex);
1059
	rdev->pm.dpm.current_ps = rdev->pm.dpm.requested_ps = rdev->pm.dpm.boot_ps;
1060
	radeon_dpm_setup_asic(rdev);
1061
	ret = radeon_dpm_enable(rdev);
1062
	mutex_unlock(&rdev->pm.mutex);
1063
	if (ret)
1064
		goto dpm_resume_fail;
1065
	rdev->pm.dpm_enabled = true;
1066
	return;
1067
 
1068
dpm_resume_fail:
1069
	DRM_ERROR("radeon: dpm resume failed\n");
1070
	if ((rdev->family >= CHIP_BARTS) &&
1071
	    (rdev->family <= CHIP_CAYMAN) &&
1072
	    rdev->mc_fw) {
1073
		if (rdev->pm.default_vddc)
1074
			radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
1075
						SET_VOLTAGE_TYPE_ASIC_VDDC);
1076
		if (rdev->pm.default_vddci)
1077
			radeon_atom_set_voltage(rdev, rdev->pm.default_vddci,
1078
						SET_VOLTAGE_TYPE_ASIC_VDDCI);
1079
		if (rdev->pm.default_sclk)
1080
			radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
1081
		if (rdev->pm.default_mclk)
1082
			radeon_set_memory_clock(rdev, rdev->pm.default_mclk);
1083
	}
1084
}
1085
 
1086
void radeon_pm_resume(struct radeon_device *rdev)
1087
{
1088
	if (rdev->pm.pm_method == PM_METHOD_DPM)
1089
		radeon_pm_resume_dpm(rdev);
1090
	else
1091
		radeon_pm_resume_old(rdev);
1092
}
1093
 
1094
static int radeon_pm_init_old(struct radeon_device *rdev)
1095
{
1096
	int ret;
1097
 
1963 serge 1098
	rdev->pm.profile = PM_PROFILE_DEFAULT;
1099
	rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
1100
	rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
1101
	rdev->pm.dynpm_can_upclock = true;
1102
	rdev->pm.dynpm_can_downclock = true;
1103
	rdev->pm.default_sclk = rdev->clock.default_sclk;
1104
	rdev->pm.default_mclk = rdev->clock.default_mclk;
1105
	rdev->pm.current_sclk = rdev->clock.default_sclk;
1106
	rdev->pm.current_mclk = rdev->clock.default_mclk;
1107
	rdev->pm.int_thermal_type = THERMAL_TYPE_NONE;
1108
 
1430 serge 1109
	if (rdev->bios) {
1110
		if (rdev->is_atom_bios)
1111
			radeon_atombios_get_power_modes(rdev);
1112
		else
1113
			radeon_combios_get_power_modes(rdev);
1963 serge 1114
		radeon_pm_print_states(rdev);
1115
		radeon_pm_init_profile(rdev);
5078 serge 1116
		/* set up the default clocks if the MC ucode is loaded */
1117
		if ((rdev->family >= CHIP_BARTS) &&
1118
		    (rdev->family <= CHIP_CAYMAN) &&
1119
		    rdev->mc_fw) {
1120
			if (rdev->pm.default_vddc)
1121
				radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
1122
							SET_VOLTAGE_TYPE_ASIC_VDDC);
1123
			if (rdev->pm.default_vddci)
1124
				radeon_atom_set_voltage(rdev, rdev->pm.default_vddci,
1125
							SET_VOLTAGE_TYPE_ASIC_VDDCI);
1126
			if (rdev->pm.default_sclk)
1127
				radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
1128
			if (rdev->pm.default_mclk)
1129
				radeon_set_memory_clock(rdev, rdev->pm.default_mclk);
1130
		}
1430 serge 1131
	}
1132
 
1963 serge 1133
	/* set up the internal thermal sensor if applicable */
1134
	ret = radeon_hwmon_init(rdev);
1135
	if (ret)
1136
		return ret;
1268 serge 1137
 
5078 serge 1138
//	INIT_DELAYED_WORK(&rdev->pm.dynpm_idle_work, radeon_dynpm_idle_work_handler);
1139
 
1963 serge 1140
	if (rdev->pm.num_power_states > 1) {
5078 serge 1141
		/* where's the best place to put these? */
1430 serge 1142
 
5078 serge 1143
 
1963 serge 1144
		DRM_INFO("radeon: power management initialized\n");
1430 serge 1145
	}
1146
 
1268 serge 1147
	return 0;
1148
}
1149
 
5078 serge 1150
static void radeon_dpm_print_power_states(struct radeon_device *rdev)
1963 serge 1151
{
5078 serge 1152
	int i;
1153
 
1154
	for (i = 0; i < rdev->pm.dpm.num_ps; i++) {
1155
		printk("== power state %d ==\n", i);
1156
		radeon_dpm_print_power_state(rdev, &rdev->pm.dpm.ps[i]);
1157
	}
1158
}
1159
 
1160
static int radeon_pm_init_dpm(struct radeon_device *rdev)
1161
{
1162
	int ret;
1163
 
1164
	/* default to balanced state */
1165
	rdev->pm.dpm.state = POWER_STATE_TYPE_BALANCED;
1166
	rdev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED;
1167
	rdev->pm.dpm.forced_level = RADEON_DPM_FORCED_LEVEL_AUTO;
1168
	rdev->pm.default_sclk = rdev->clock.default_sclk;
1169
	rdev->pm.default_mclk = rdev->clock.default_mclk;
1170
	rdev->pm.current_sclk = rdev->clock.default_sclk;
1171
	rdev->pm.current_mclk = rdev->clock.default_mclk;
1172
	rdev->pm.int_thermal_type = THERMAL_TYPE_NONE;
1173
 
1174
	if (rdev->bios && rdev->is_atom_bios)
1175
		radeon_atombios_get_power_modes(rdev);
1176
	else
1177
		return -EINVAL;
1178
 
1179
	/* set up the internal thermal sensor if applicable */
1180
	ret = radeon_hwmon_init(rdev);
1181
	if (ret)
1182
		return ret;
1183
 
1184
	INIT_WORK(&rdev->pm.dpm.thermal.work, radeon_dpm_thermal_work_handler);
1185
	mutex_lock(&rdev->pm.mutex);
1186
	radeon_dpm_init(rdev);
1187
	rdev->pm.dpm.current_ps = rdev->pm.dpm.requested_ps = rdev->pm.dpm.boot_ps;
1188
	if (radeon_dpm == 1)
1189
		radeon_dpm_print_power_states(rdev);
1190
	radeon_dpm_setup_asic(rdev);
1191
	ret = radeon_dpm_enable(rdev);
1192
	mutex_unlock(&rdev->pm.mutex);
1193
	if (ret)
1194
		goto dpm_failed;
1195
	rdev->pm.dpm_enabled = true;
1196
 
1197
	DRM_INFO("radeon: dpm initialized\n");
1198
 
1199
	return 0;
1200
 
1201
dpm_failed:
1202
	rdev->pm.dpm_enabled = false;
1203
	if ((rdev->family >= CHIP_BARTS) &&
1204
	    (rdev->family <= CHIP_CAYMAN) &&
1205
	    rdev->mc_fw) {
1206
		if (rdev->pm.default_vddc)
1207
			radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
1208
						SET_VOLTAGE_TYPE_ASIC_VDDC);
1209
		if (rdev->pm.default_vddci)
1210
			radeon_atom_set_voltage(rdev, rdev->pm.default_vddci,
1211
						SET_VOLTAGE_TYPE_ASIC_VDDCI);
1212
		if (rdev->pm.default_sclk)
1213
			radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
1214
		if (rdev->pm.default_mclk)
1215
			radeon_set_memory_clock(rdev, rdev->pm.default_mclk);
1216
	}
1217
	DRM_ERROR("radeon: dpm initialization failed\n");
1218
	return ret;
1219
}
1220
 
1221
int radeon_pm_init(struct radeon_device *rdev)
1222
{
1223
	/* enable dpm on rv6xx+ */
1224
	switch (rdev->family) {
1225
	case CHIP_RV610:
1226
	case CHIP_RV630:
1227
	case CHIP_RV620:
1228
	case CHIP_RV635:
1229
	case CHIP_RV670:
1230
	case CHIP_RS780:
1231
	case CHIP_RS880:
1232
	case CHIP_RV770:
1233
		/* DPM requires the RLC, RV770+ dGPU requires SMC */
1234
		if (!rdev->rlc_fw)
1235
			rdev->pm.pm_method = PM_METHOD_PROFILE;
1236
		else if ((rdev->family >= CHIP_RV770) &&
1237
			 (!(rdev->flags & RADEON_IS_IGP)) &&
1238
			 (!rdev->smc_fw))
1239
			rdev->pm.pm_method = PM_METHOD_PROFILE;
1240
		else if (radeon_dpm == 1)
1241
			rdev->pm.pm_method = PM_METHOD_DPM;
1242
		else
1243
			rdev->pm.pm_method = PM_METHOD_PROFILE;
1244
		break;
1245
	case CHIP_RV730:
1246
	case CHIP_RV710:
1247
	case CHIP_RV740:
1248
	case CHIP_CEDAR:
1249
	case CHIP_REDWOOD:
1250
	case CHIP_JUNIPER:
1251
	case CHIP_CYPRESS:
1252
	case CHIP_HEMLOCK:
1253
	case CHIP_PALM:
1254
	case CHIP_SUMO:
1255
	case CHIP_SUMO2:
1256
	case CHIP_BARTS:
1257
	case CHIP_TURKS:
1258
	case CHIP_CAICOS:
1259
	case CHIP_CAYMAN:
1260
	case CHIP_ARUBA:
1261
	case CHIP_TAHITI:
1262
	case CHIP_PITCAIRN:
1263
	case CHIP_VERDE:
1264
	case CHIP_OLAND:
1265
	case CHIP_HAINAN:
1266
	case CHIP_BONAIRE:
1267
	case CHIP_KABINI:
1268
	case CHIP_KAVERI:
1269
	case CHIP_HAWAII:
1270
	case CHIP_MULLINS:
1271
		/* DPM requires the RLC, RV770+ dGPU requires SMC */
1272
		if (!rdev->rlc_fw)
1273
			rdev->pm.pm_method = PM_METHOD_PROFILE;
1274
		else if ((rdev->family >= CHIP_RV770) &&
1275
			 (!(rdev->flags & RADEON_IS_IGP)) &&
1276
			 (!rdev->smc_fw))
1277
			rdev->pm.pm_method = PM_METHOD_PROFILE;
1278
		else if (radeon_dpm == 0)
1279
			rdev->pm.pm_method = PM_METHOD_PROFILE;
1280
		else
1281
			rdev->pm.pm_method = PM_METHOD_DPM;
1282
		break;
1283
	default:
1284
		/* default to profile method */
1285
		rdev->pm.pm_method = PM_METHOD_PROFILE;
1286
		break;
1287
	}
1288
 
1289
	if (rdev->pm.pm_method == PM_METHOD_DPM)
1290
		return radeon_pm_init_dpm(rdev);
1291
	else
1292
		return radeon_pm_init_old(rdev);
1293
}
1294
 
1295
int radeon_pm_late_init(struct radeon_device *rdev)
1296
{
1297
	int ret = 0;
1298
 
1299
	if (rdev->pm.pm_method == PM_METHOD_DPM) {
1300
		mutex_lock(&rdev->pm.mutex);
1301
		ret = radeon_dpm_late_enable(rdev);
1302
		mutex_unlock(&rdev->pm.mutex);
1303
	}
1304
	return ret;
1305
}
1306
 
1307
static void radeon_pm_fini_old(struct radeon_device *rdev)
1308
{
1963 serge 1309
	if (rdev->pm.num_power_states > 1) {
1310
		mutex_lock(&rdev->pm.mutex);
1311
		if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
1312
			rdev->pm.profile = PM_PROFILE_DEFAULT;
1313
			radeon_pm_update_profile(rdev);
1314
			radeon_pm_set_clocks(rdev);
1315
		} else if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
1316
			/* reset default clocks */
1317
			rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
1318
			rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
1319
			radeon_pm_set_clocks(rdev);
1320
		}
1321
		mutex_unlock(&rdev->pm.mutex);
1322
 
1323
//		cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work);
1324
 
5078 serge 1325
   }
1963 serge 1326
 
1327
	radeon_hwmon_fini(rdev);
5078 serge 1328
		kfree(rdev->pm.power_state);
1963 serge 1329
}
1330
 
5078 serge 1331
static void radeon_pm_fini_dpm(struct radeon_device *rdev)
1430 serge 1332
{
5078 serge 1333
	if (rdev->pm.num_power_states > 1) {
1334
		mutex_lock(&rdev->pm.mutex);
1335
		radeon_dpm_disable(rdev);
1336
		mutex_unlock(&rdev->pm.mutex);
1337
	}
1338
	radeon_dpm_fini(rdev);
1339
 
1340
	radeon_hwmon_fini(rdev);
1341
	kfree(rdev->pm.power_state);
1342
}
1343
 
1344
void radeon_pm_fini(struct radeon_device *rdev)
1345
{
1346
	if (rdev->pm.pm_method == PM_METHOD_DPM)
1347
		radeon_pm_fini_dpm(rdev);
1348
	else
1349
		radeon_pm_fini_old(rdev);
1350
}
1351
 
1352
static void radeon_pm_compute_clocks_old(struct radeon_device *rdev)
1353
{
1430 serge 1354
	struct drm_device *ddev = rdev->ddev;
1963 serge 1355
	struct drm_crtc *crtc;
1430 serge 1356
	struct radeon_crtc *radeon_crtc;
1357
 
1963 serge 1358
	if (rdev->pm.num_power_states < 2)
1430 serge 1359
		return;
1360
 
1361
	mutex_lock(&rdev->pm.mutex);
1362
 
1363
	rdev->pm.active_crtcs = 0;
1963 serge 1364
	rdev->pm.active_crtc_count = 0;
5078 serge 1365
	if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) {
1963 serge 1366
	list_for_each_entry(crtc,
1367
		&ddev->mode_config.crtc_list, head) {
1368
		radeon_crtc = to_radeon_crtc(crtc);
1369
		if (radeon_crtc->enabled) {
1430 serge 1370
			rdev->pm.active_crtcs |= (1 << radeon_crtc->crtc_id);
1963 serge 1371
			rdev->pm.active_crtc_count++;
1430 serge 1372
		}
1373
	}
5078 serge 1374
	}
1430 serge 1375
 
1963 serge 1376
	if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
1377
		radeon_pm_update_profile(rdev);
1378
		radeon_pm_set_clocks(rdev);
1379
	} else if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
1380
		if (rdev->pm.dynpm_state != DYNPM_STATE_DISABLED) {
1381
			if (rdev->pm.active_crtc_count > 1) {
1382
				if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) {
1383
//                   cancel_delayed_work(&rdev->pm.dynpm_idle_work);
1430 serge 1384
 
1963 serge 1385
					rdev->pm.dynpm_state = DYNPM_STATE_PAUSED;
1386
					rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
1387
					radeon_pm_get_dynpm_state(rdev);
1430 serge 1388
				radeon_pm_set_clocks(rdev);
1389
 
1963 serge 1390
					DRM_DEBUG_DRIVER("radeon: dynamic power management deactivated\n");
1430 serge 1391
		}
1963 serge 1392
			} else if (rdev->pm.active_crtc_count == 1) {
1430 serge 1393
		/* TODO: Increase clocks if needed for current mode */
1394
 
1963 serge 1395
				if (rdev->pm.dynpm_state == DYNPM_STATE_MINIMUM) {
1396
					rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
1397
					rdev->pm.dynpm_planned_action = DYNPM_ACTION_UPCLOCK;
1398
					radeon_pm_get_dynpm_state(rdev);
1430 serge 1399
			radeon_pm_set_clocks(rdev);
1963 serge 1400
 
1401
//					schedule_delayed_work(&rdev->pm.dynpm_idle_work,
1402
//							   msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
1403
				} else if (rdev->pm.dynpm_state == DYNPM_STATE_PAUSED) {
1404
					rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
1405
//					schedule_delayed_work(&rdev->pm.dynpm_idle_work,
1406
//							   msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
1407
					DRM_DEBUG_DRIVER("radeon: dynamic power management activated\n");
1430 serge 1408
        }
1963 serge 1409
			} else { /* count == 0 */
1410
				if (rdev->pm.dynpm_state != DYNPM_STATE_MINIMUM) {
1411
//					cancel_delayed_work(&rdev->pm.dynpm_idle_work);
1412
 
1413
					rdev->pm.dynpm_state = DYNPM_STATE_MINIMUM;
1414
					rdev->pm.dynpm_planned_action = DYNPM_ACTION_MINIMUM;
1415
					radeon_pm_get_dynpm_state(rdev);
1416
					radeon_pm_set_clocks(rdev);
1430 serge 1417
		}
1418
	}
1419
		}
1420
	}
1421
 
1422
	mutex_unlock(&rdev->pm.mutex);
1423
}
1424
 
5078 serge 1425
static void radeon_pm_compute_clocks_dpm(struct radeon_device *rdev)
1426
{
1427
	struct drm_device *ddev = rdev->ddev;
1428
	struct drm_crtc *crtc;
1429
	struct radeon_crtc *radeon_crtc;
1430
 
1431
	if (!rdev->pm.dpm_enabled)
1432
		return;
1433
 
1434
	mutex_lock(&rdev->pm.mutex);
1435
 
1436
	/* update active crtc counts */
1437
	rdev->pm.dpm.new_active_crtcs = 0;
1438
	rdev->pm.dpm.new_active_crtc_count = 0;
1439
	if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) {
1440
		list_for_each_entry(crtc,
1441
				    &ddev->mode_config.crtc_list, head) {
1442
			radeon_crtc = to_radeon_crtc(crtc);
1443
			if (crtc->enabled) {
1444
				rdev->pm.dpm.new_active_crtcs |= (1 << radeon_crtc->crtc_id);
1445
				rdev->pm.dpm.new_active_crtc_count++;
1446
			}
1447
		}
1448
	}
1449
 
1450
	/* update battery/ac status */
1451
	if (power_supply_is_system_supplied() > 0)
1452
		rdev->pm.dpm.ac_power = true;
1453
	else
1454
		rdev->pm.dpm.ac_power = false;
1455
 
1456
	radeon_dpm_change_power_state_locked(rdev);
1457
 
1458
	mutex_unlock(&rdev->pm.mutex);
1459
 
1460
}
1461
 
1462
void radeon_pm_compute_clocks(struct radeon_device *rdev)
1463
{
1464
	if (rdev->pm.pm_method == PM_METHOD_DPM)
1465
		radeon_pm_compute_clocks_dpm(rdev);
1466
	else
1467
		radeon_pm_compute_clocks_old(rdev);
1468
}
1469
 
1963 serge 1470
static bool radeon_pm_in_vbl(struct radeon_device *rdev)
1430 serge 1471
{
1963 serge 1472
	int  crtc, vpos, hpos, vbl_status;
1430 serge 1473
	bool in_vbl = true;
1474
 
1963 serge 1475
	/* Iterate over all active crtc's. All crtc's must be in vblank,
1476
	 * otherwise return in_vbl == false.
1477
	 */
1478
	for (crtc = 0; (crtc < rdev->num_crtc) && in_vbl; crtc++) {
1479
		if (rdev->pm.active_crtcs & (1 << crtc)) {
5078 serge 1480
			vbl_status = radeon_get_crtc_scanoutpos(rdev->ddev, crtc, 0, &vpos, &hpos, NULL, NULL);
1963 serge 1481
			if ((vbl_status & DRM_SCANOUTPOS_VALID) &&
5271 serge 1482
			    !(vbl_status & DRM_SCANOUTPOS_IN_VBLANK))
1430 serge 1483
				in_vbl = false;
1484
		}
1485
		}
1963 serge 1486
 
1430 serge 1487
	return in_vbl;
1488
}
1489
 
1963 serge 1490
static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish)
1430 serge 1491
{
1963 serge 1492
	u32 stat_crtc = 0;
1493
	bool in_vbl = radeon_pm_in_vbl(rdev);
1430 serge 1494
 
1963 serge 1495
	if (in_vbl == false)
1496
		DRM_DEBUG_DRIVER("not in vbl for pm change %08x at %s\n", stat_crtc,
1497
			 finish ? "exit" : "entry");
1498
	return in_vbl;
1430 serge 1499
}
1500
 
1501
 
1268 serge 1502
/*
1503
 * Debugfs info
1504
 */
1505
#if defined(CONFIG_DEBUG_FS)
1506
 
1507
static int radeon_debugfs_pm_info(struct seq_file *m, void *data)
1508
{
1509
	struct drm_info_node *node = (struct drm_info_node *) m->private;
1510
	struct drm_device *dev = node->minor->dev;
1511
	struct radeon_device *rdev = dev->dev_private;
5078 serge 1512
	struct drm_device *ddev = rdev->ddev;
1268 serge 1513
 
5078 serge 1514
	if  ((rdev->flags & RADEON_IS_PX) &&
1515
	     (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) {
1516
		seq_printf(m, "PX asic powered off\n");
1517
	} else if (rdev->pm.dpm_enabled) {
1518
		mutex_lock(&rdev->pm.mutex);
1519
		if (rdev->asic->dpm.debugfs_print_current_performance_level)
1520
			radeon_dpm_debugfs_print_current_performance_level(rdev, m);
1521
		else
1522
			seq_printf(m, "Debugfs support not implemented for this asic\n");
1523
		mutex_unlock(&rdev->pm.mutex);
1524
	} else {
1963 serge 1525
	seq_printf(m, "default engine clock: %u0 kHz\n", rdev->pm.default_sclk);
3764 Serge 1526
	/* radeon_get_engine_clock is not reliable on APUs so just print the current clock */
1527
	if ((rdev->family >= CHIP_PALM) && (rdev->flags & RADEON_IS_IGP))
1528
		seq_printf(m, "current engine clock: %u0 kHz\n", rdev->pm.current_sclk);
1529
	else
1404 serge 1530
	seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev));
1963 serge 1531
	seq_printf(m, "default memory clock: %u0 kHz\n", rdev->pm.default_mclk);
3764 Serge 1532
	if (rdev->asic->pm.get_memory_clock)
1404 serge 1533
		seq_printf(m, "current memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev));
1963 serge 1534
	if (rdev->pm.current_vddc)
1535
		seq_printf(m, "voltage: %u mV\n", rdev->pm.current_vddc);
3764 Serge 1536
	if (rdev->asic->pm.get_pcie_lanes)
1430 serge 1537
		seq_printf(m, "PCIE lanes: %d\n", radeon_get_pcie_lanes(rdev));
5078 serge 1538
	}
1268 serge 1539
 
1540
	return 0;
1541
}
1542
 
1543
static struct drm_info_list radeon_pm_info_list[] = {
1544
	{"radeon_pm_info", radeon_debugfs_pm_info, 0, NULL},
1545
};
1546
#endif
1547
 
1430 serge 1548
static int radeon_debugfs_pm_init(struct radeon_device *rdev)
1268 serge 1549
{
1550
#if defined(CONFIG_DEBUG_FS)
1551
	return radeon_debugfs_add_files(rdev, radeon_pm_info_list, ARRAY_SIZE(radeon_pm_info_list));
1552
#else
1553
	return 0;
1554
#endif
1555
}