Subversion Repositories Kolibri OS

Rev

Rev 5346 | Rev 6321 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
1268 serge 1
/*
2
 * Permission is hereby granted, free of charge, to any person obtaining a
3
 * copy of this software and associated documentation files (the "Software"),
4
 * to deal in the Software without restriction, including without limitation
5
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
6
 * and/or sell copies of the Software, and to permit persons to whom the
7
 * Software is furnished to do so, subject to the following conditions:
8
 *
9
 * The above copyright notice and this permission notice shall be included in
10
 * all copies or substantial portions of the Software.
11
 *
12
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
13
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
15
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
16
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
17
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
18
 * OTHER DEALINGS IN THE SOFTWARE.
19
 *
20
 * Authors: Rafał Miłecki 
1430 serge 21
 *          Alex Deucher 
1268 serge 22
 */
2997 Serge 23
#include 
1268 serge 24
#include "radeon.h"
1430 serge 25
#include "avivod.h"
1986 serge 26
#include "atom.h"
6104 serge 27
#include "r600_dpm.h"
1268 serge 28
 
1430 serge 29
#define RADEON_IDLE_LOOP_MS 100
30
#define RADEON_RECLOCK_DELAY_MS 200
31
#define RADEON_WAIT_VBLANK_TIMEOUT 200
1268 serge 32
 
1963 serge 33
static const char *radeon_pm_state_type_name[5] = {
2997 Serge 34
	"",
1430 serge 35
	"Powersave",
36
	"Battery",
37
	"Balanced",
38
	"Performance",
39
};
40
 
1963 serge 41
static void radeon_dynpm_idle_work_handler(struct work_struct *work);
42
static int radeon_debugfs_pm_init(struct radeon_device *rdev);
43
static bool radeon_pm_in_vbl(struct radeon_device *rdev);
44
static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish);
45
static void radeon_pm_update_profile(struct radeon_device *rdev);
46
static void radeon_pm_set_clocks(struct radeon_device *rdev);
47
 
2997 Serge 48
int radeon_pm_get_type_index(struct radeon_device *rdev,
49
			     enum radeon_pm_state_type ps_type,
50
			     int instance)
51
{
52
	int i;
53
	int found_instance = -1;
1963 serge 54
 
2997 Serge 55
	for (i = 0; i < rdev->pm.num_power_states; i++) {
56
		if (rdev->pm.power_state[i].type == ps_type) {
57
			found_instance++;
58
			if (found_instance == instance)
59
				return i;
60
		}
61
	}
62
	/* return default if no match */
63
	return rdev->pm.default_power_state_index;
64
}
1963 serge 65
 
2997 Serge 66
void radeon_pm_acpi_event_handler(struct radeon_device *rdev)
1430 serge 67
{
5078 serge 68
	if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
69
		mutex_lock(&rdev->pm.mutex);
70
		if (power_supply_is_system_supplied() > 0)
71
			rdev->pm.dpm.ac_power = true;
72
		else
73
			rdev->pm.dpm.ac_power = false;
74
		if (rdev->family == CHIP_ARUBA) {
6104 serge 75
			if (rdev->asic->dpm.enable_bapm)
76
				radeon_dpm_enable_bapm(rdev, rdev->pm.dpm.ac_power);
5078 serge 77
		}
78
		mutex_unlock(&rdev->pm.mutex);
79
        } else if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
6104 serge 80
		if (rdev->pm.profile == PM_PROFILE_AUTO) {
81
			mutex_lock(&rdev->pm.mutex);
82
			radeon_pm_update_profile(rdev);
83
			radeon_pm_set_clocks(rdev);
84
			mutex_unlock(&rdev->pm.mutex);
1430 serge 85
		}
86
	}
87
}
88
 
1963 serge 89
static void radeon_pm_update_profile(struct radeon_device *rdev)
1430 serge 90
{
1963 serge 91
	switch (rdev->pm.profile) {
92
	case PM_PROFILE_DEFAULT:
93
		rdev->pm.profile_index = PM_PROFILE_DEFAULT_IDX;
1430 serge 94
		break;
1963 serge 95
	case PM_PROFILE_AUTO:
96
		if (power_supply_is_system_supplied() > 0) {
97
			if (rdev->pm.active_crtc_count > 1)
98
				rdev->pm.profile_index = PM_PROFILE_HIGH_MH_IDX;
99
			else
100
				rdev->pm.profile_index = PM_PROFILE_HIGH_SH_IDX;
1430 serge 101
		} else {
1963 serge 102
			if (rdev->pm.active_crtc_count > 1)
103
				rdev->pm.profile_index = PM_PROFILE_MID_MH_IDX;
104
			else
105
				rdev->pm.profile_index = PM_PROFILE_MID_SH_IDX;
1430 serge 106
		}
107
		break;
1963 serge 108
	case PM_PROFILE_LOW:
109
		if (rdev->pm.active_crtc_count > 1)
110
			rdev->pm.profile_index = PM_PROFILE_LOW_MH_IDX;
111
		else
112
			rdev->pm.profile_index = PM_PROFILE_LOW_SH_IDX;
1430 serge 113
		break;
1963 serge 114
	case PM_PROFILE_MID:
115
		if (rdev->pm.active_crtc_count > 1)
116
			rdev->pm.profile_index = PM_PROFILE_MID_MH_IDX;
117
		else
118
			rdev->pm.profile_index = PM_PROFILE_MID_SH_IDX;
119
		break;
120
	case PM_PROFILE_HIGH:
121
		if (rdev->pm.active_crtc_count > 1)
122
			rdev->pm.profile_index = PM_PROFILE_HIGH_MH_IDX;
123
		else
124
			rdev->pm.profile_index = PM_PROFILE_HIGH_SH_IDX;
125
		break;
1430 serge 126
	}
127
 
1963 serge 128
	if (rdev->pm.active_crtc_count == 0) {
129
		rdev->pm.requested_power_state_index =
130
			rdev->pm.profiles[rdev->pm.profile_index].dpms_off_ps_idx;
131
		rdev->pm.requested_clock_mode_index =
132
			rdev->pm.profiles[rdev->pm.profile_index].dpms_off_cm_idx;
133
	} else {
134
		rdev->pm.requested_power_state_index =
135
			rdev->pm.profiles[rdev->pm.profile_index].dpms_on_ps_idx;
136
		rdev->pm.requested_clock_mode_index =
137
			rdev->pm.profiles[rdev->pm.profile_index].dpms_on_cm_idx;
6104 serge 138
	}
1963 serge 139
}
140
 
141
static void radeon_unmap_vram_bos(struct radeon_device *rdev)
142
{
143
	struct radeon_bo *bo, *n;
144
 
145
	if (list_empty(&rdev->gem.objects))
146
		return;
147
 
5078 serge 148
	list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) {
149
		if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
150
			ttm_bo_unmap_virtual(&bo->tbo);
151
	}
1963 serge 152
}
153
 
2997 Serge 154
static void radeon_sync_with_vblank(struct radeon_device *rdev)
155
{
156
	if (rdev->pm.active_crtcs) {
157
		rdev->pm.vblank_sync = false;
6104 serge 158
		wait_event_timeout(
159
			rdev->irq.vblank_queue, rdev->pm.vblank_sync,
160
			msecs_to_jiffies(RADEON_WAIT_VBLANK_TIMEOUT));
161
	}
2997 Serge 162
}
1963 serge 163
 
164
static void radeon_set_power_state(struct radeon_device *rdev)
165
{
166
	u32 sclk, mclk;
167
	bool misc_after = false;
168
 
169
	if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) &&
170
	    (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index))
171
		return;
172
 
173
	if (radeon_gui_idle(rdev)) {
174
		sclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
175
			clock_info[rdev->pm.requested_clock_mode_index].sclk;
176
		if (sclk > rdev->pm.default_sclk)
177
			sclk = rdev->pm.default_sclk;
178
 
2997 Serge 179
		/* starting with BTC, there is one state that is used for both
180
		 * MH and SH.  Difference is that we always use the high clock index for
3764 Serge 181
		 * mclk and vddci.
2997 Serge 182
		 */
183
		if ((rdev->pm.pm_method == PM_METHOD_PROFILE) &&
184
		    (rdev->family >= CHIP_BARTS) &&
185
		    rdev->pm.active_crtc_count &&
186
		    ((rdev->pm.profile_index == PM_PROFILE_MID_MH_IDX) ||
187
		     (rdev->pm.profile_index == PM_PROFILE_LOW_MH_IDX)))
188
			mclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
189
				clock_info[rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx].mclk;
190
		else
6104 serge 191
			mclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
192
				clock_info[rdev->pm.requested_clock_mode_index].mclk;
2997 Serge 193
 
1963 serge 194
		if (mclk > rdev->pm.default_mclk)
195
			mclk = rdev->pm.default_mclk;
196
 
197
		/* upvolt before raising clocks, downvolt after lowering clocks */
198
		if (sclk < rdev->pm.current_sclk)
199
			misc_after = true;
200
 
2997 Serge 201
		radeon_sync_with_vblank(rdev);
1963 serge 202
 
203
		if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
204
			if (!radeon_pm_in_vbl(rdev))
205
				return;
206
		}
207
 
208
		radeon_pm_prepare(rdev);
209
 
210
		if (!misc_after)
211
			/* voltage, pcie lanes, etc.*/
212
			radeon_pm_misc(rdev);
213
 
6104 serge 214
		/* set engine clock */
1963 serge 215
		if (sclk != rdev->pm.current_sclk) {
6104 serge 216
			radeon_pm_debug_check_in_vbl(rdev, false);
1963 serge 217
			radeon_set_engine_clock(rdev, sclk);
6104 serge 218
			radeon_pm_debug_check_in_vbl(rdev, true);
1963 serge 219
			rdev->pm.current_sclk = sclk;
220
			DRM_DEBUG_DRIVER("Setting: e: %d\n", sclk);
221
		}
222
 
6104 serge 223
		/* set memory clock */
2997 Serge 224
		if (rdev->asic->pm.set_memory_clock && (mclk != rdev->pm.current_mclk)) {
6104 serge 225
			radeon_pm_debug_check_in_vbl(rdev, false);
1963 serge 226
			radeon_set_memory_clock(rdev, mclk);
6104 serge 227
			radeon_pm_debug_check_in_vbl(rdev, true);
1963 serge 228
			rdev->pm.current_mclk = mclk;
229
			DRM_DEBUG_DRIVER("Setting: m: %d\n", mclk);
230
		}
231
 
232
		if (misc_after)
233
			/* voltage, pcie lanes, etc.*/
234
			radeon_pm_misc(rdev);
235
 
236
		radeon_pm_finish(rdev);
237
 
238
		rdev->pm.current_power_state_index = rdev->pm.requested_power_state_index;
239
		rdev->pm.current_clock_mode_index = rdev->pm.requested_clock_mode_index;
240
	} else
241
		DRM_DEBUG_DRIVER("pm: GUI not idle!!!\n");
242
}
243
 
244
static void radeon_pm_set_clocks(struct radeon_device *rdev)
245
{
3764 Serge 246
	int i, r;
1963 serge 247
 
248
	/* no need to take locks, etc. if nothing's going to change */
249
	if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) &&
250
	    (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index))
251
		return;
252
 
5346 serge 253
	down_write(&rdev->pm.mclk_lock);
2997 Serge 254
	mutex_lock(&rdev->ring_lock);
1963 serge 255
 
2997 Serge 256
	/* wait for the rings to drain */
257
	for (i = 0; i < RADEON_NUM_RINGS; i++) {
258
		struct radeon_ring *ring = &rdev->ring[i];
3764 Serge 259
		if (!ring->ready) {
260
			continue;
261
		}
5078 serge 262
		r = radeon_fence_wait_empty(rdev, i);
3764 Serge 263
		if (r) {
264
			/* needs a GPU reset dont reset here */
265
			mutex_unlock(&rdev->ring_lock);
5346 serge 266
			up_write(&rdev->pm.mclk_lock);
3764 Serge 267
			return;
268
		}
1430 serge 269
	}
2997 Serge 270
 
1963 serge 271
	radeon_unmap_vram_bos(rdev);
1430 serge 272
 
1963 serge 273
	if (rdev->irq.installed) {
274
		for (i = 0; i < rdev->num_crtc; i++) {
275
			if (rdev->pm.active_crtcs & (1 << i)) {
276
				rdev->pm.req_vblank |= (1 << i);
6104 serge 277
				drm_vblank_get(rdev->ddev, i);
1963 serge 278
			}
279
		}
280
	}
281
 
282
	radeon_set_power_state(rdev);
283
 
284
	if (rdev->irq.installed) {
285
		for (i = 0; i < rdev->num_crtc; i++) {
286
			if (rdev->pm.req_vblank & (1 << i)) {
287
				rdev->pm.req_vblank &= ~(1 << i);
6104 serge 288
				drm_vblank_put(rdev->ddev, i);
1963 serge 289
			}
290
		}
291
	}
292
 
293
	/* update display watermarks based on new power state */
294
	radeon_update_bandwidth_info(rdev);
295
	if (rdev->pm.active_crtc_count)
296
		radeon_bandwidth_update(rdev);
297
 
298
	rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
299
 
2997 Serge 300
	mutex_unlock(&rdev->ring_lock);
5346 serge 301
	up_write(&rdev->pm.mclk_lock);
1430 serge 302
}
303
 
1963 serge 304
static void radeon_pm_print_states(struct radeon_device *rdev)
1430 serge 305
{
1963 serge 306
	int i, j;
307
	struct radeon_power_state *power_state;
308
	struct radeon_pm_clock_info *clock_info;
309
 
310
	DRM_DEBUG_DRIVER("%d Power State(s)\n", rdev->pm.num_power_states);
311
	for (i = 0; i < rdev->pm.num_power_states; i++) {
312
		power_state = &rdev->pm.power_state[i];
313
		DRM_DEBUG_DRIVER("State %d: %s\n", i,
314
			radeon_pm_state_type_name[power_state->type]);
315
		if (i == rdev->pm.default_power_state_index)
316
			DRM_DEBUG_DRIVER("\tDefault");
317
		if ((rdev->flags & RADEON_IS_PCIE) && !(rdev->flags & RADEON_IS_IGP))
318
			DRM_DEBUG_DRIVER("\t%d PCIE Lanes\n", power_state->pcie_lanes);
319
		if (power_state->flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
320
			DRM_DEBUG_DRIVER("\tSingle display only\n");
321
		DRM_DEBUG_DRIVER("\t%d Clock Mode(s)\n", power_state->num_clock_modes);
322
		for (j = 0; j < power_state->num_clock_modes; j++) {
323
			clock_info = &(power_state->clock_info[j]);
324
			if (rdev->flags & RADEON_IS_IGP)
2997 Serge 325
				DRM_DEBUG_DRIVER("\t\t%d e: %d\n",
6104 serge 326
						 j,
2997 Serge 327
						 clock_info->sclk * 10);
1963 serge 328
			else
2997 Serge 329
				DRM_DEBUG_DRIVER("\t\t%d e: %d\tm: %d\tv: %d\n",
6104 serge 330
						 j,
331
						 clock_info->sclk * 10,
332
						 clock_info->mclk * 10,
2997 Serge 333
						 clock_info->voltage.voltage);
1963 serge 334
		}
1430 serge 335
	}
1963 serge 336
}
1430 serge 337
 
1963 serge 338
static ssize_t radeon_get_pm_profile(struct device *dev,
339
				     struct device_attribute *attr,
340
				     char *buf)
341
{
5078 serge 342
	struct drm_device *ddev = dev_get_drvdata(dev);
2997 Serge 343
	struct radeon_device *rdev = ddev->dev_private;
344
	int cp = rdev->pm.profile;
1963 serge 345
 
2997 Serge 346
	return snprintf(buf, PAGE_SIZE, "%s\n",
347
			(cp == PM_PROFILE_AUTO) ? "auto" :
348
			(cp == PM_PROFILE_LOW) ? "low" :
349
			(cp == PM_PROFILE_MID) ? "mid" :
350
			(cp == PM_PROFILE_HIGH) ? "high" : "default");
1430 serge 351
}
352
 
1963 serge 353
static ssize_t radeon_set_pm_profile(struct device *dev,
354
				     struct device_attribute *attr,
355
				     const char *buf,
356
				     size_t count)
1430 serge 357
{
5078 serge 358
	struct drm_device *ddev = dev_get_drvdata(dev);
1963 serge 359
	struct radeon_device *rdev = ddev->dev_private;
360
 
5078 serge 361
	/* Can't set profile when the card is off */
362
	if  ((rdev->flags & RADEON_IS_PX) &&
363
	     (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
364
		return -EINVAL;
365
 
1963 serge 366
	mutex_lock(&rdev->pm.mutex);
2997 Serge 367
	if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
368
		if (strncmp("default", buf, strlen("default")) == 0)
6104 serge 369
			rdev->pm.profile = PM_PROFILE_DEFAULT;
2997 Serge 370
		else if (strncmp("auto", buf, strlen("auto")) == 0)
371
			rdev->pm.profile = PM_PROFILE_AUTO;
372
		else if (strncmp("low", buf, strlen("low")) == 0)
373
			rdev->pm.profile = PM_PROFILE_LOW;
374
		else if (strncmp("mid", buf, strlen("mid")) == 0)
375
			rdev->pm.profile = PM_PROFILE_MID;
376
		else if (strncmp("high", buf, strlen("high")) == 0)
377
			rdev->pm.profile = PM_PROFILE_HIGH;
378
		else {
379
			count = -EINVAL;
380
			goto fail;
381
		}
6104 serge 382
		radeon_pm_update_profile(rdev);
383
		radeon_pm_set_clocks(rdev);
2997 Serge 384
	} else
385
		count = -EINVAL;
386
 
1963 serge 387
fail:
388
	mutex_unlock(&rdev->pm.mutex);
389
 
390
	return count;
391
}
392
 
393
static ssize_t radeon_get_pm_method(struct device *dev,
394
				    struct device_attribute *attr,
395
				    char *buf)
396
{
5078 serge 397
	struct drm_device *ddev = dev_get_drvdata(dev);
1963 serge 398
	struct radeon_device *rdev = ddev->dev_private;
399
	int pm = rdev->pm.pm_method;
400
 
401
	return snprintf(buf, PAGE_SIZE, "%s\n",
5078 serge 402
			(pm == PM_METHOD_DYNPM) ? "dynpm" :
403
			(pm == PM_METHOD_PROFILE) ? "profile" : "dpm");
1963 serge 404
}
405
 
406
static ssize_t radeon_set_pm_method(struct device *dev,
407
				    struct device_attribute *attr,
408
				    const char *buf,
409
				    size_t count)
410
{
5078 serge 411
	struct drm_device *ddev = dev_get_drvdata(dev);
1963 serge 412
	struct radeon_device *rdev = ddev->dev_private;
413
 
5078 serge 414
	/* Can't set method when the card is off */
415
	if  ((rdev->flags & RADEON_IS_PX) &&
416
	     (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) {
417
		count = -EINVAL;
418
		goto fail;
419
	}
1963 serge 420
 
5078 serge 421
	/* we don't support the legacy modes with dpm */
422
	if (rdev->pm.pm_method == PM_METHOD_DPM) {
423
		count = -EINVAL;
424
		goto fail;
425
	}
426
 
1963 serge 427
	if (strncmp("dynpm", buf, strlen("dynpm")) == 0) {
428
		mutex_lock(&rdev->pm.mutex);
429
		rdev->pm.pm_method = PM_METHOD_DYNPM;
430
		rdev->pm.dynpm_state = DYNPM_STATE_PAUSED;
431
		rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
432
		mutex_unlock(&rdev->pm.mutex);
433
	} else if (strncmp("profile", buf, strlen("profile")) == 0) {
434
		mutex_lock(&rdev->pm.mutex);
435
		/* disable dynpm */
436
		rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
437
		rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
438
		rdev->pm.pm_method = PM_METHOD_PROFILE;
439
		mutex_unlock(&rdev->pm.mutex);
440
//		cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work);
441
	} else {
2997 Serge 442
		count = -EINVAL;
1963 serge 443
		goto fail;
444
	}
445
	radeon_pm_compute_clocks(rdev);
446
fail:
447
	return count;
448
}
449
 
5078 serge 450
static ssize_t radeon_get_dpm_state(struct device *dev,
451
				    struct device_attribute *attr,
452
				    char *buf)
453
{
454
	struct drm_device *ddev = dev_get_drvdata(dev);
455
	struct radeon_device *rdev = ddev->dev_private;
456
	enum radeon_pm_state_type pm = rdev->pm.dpm.user_state;
2997 Serge 457
 
5078 serge 458
	return snprintf(buf, PAGE_SIZE, "%s\n",
459
			(pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
460
			(pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance");
461
}
462
 
463
static ssize_t radeon_set_dpm_state(struct device *dev,
464
				    struct device_attribute *attr,
465
				    const char *buf,
466
				    size_t count)
467
{
468
	struct drm_device *ddev = dev_get_drvdata(dev);
469
	struct radeon_device *rdev = ddev->dev_private;
470
 
471
	mutex_lock(&rdev->pm.mutex);
472
	if (strncmp("battery", buf, strlen("battery")) == 0)
473
		rdev->pm.dpm.user_state = POWER_STATE_TYPE_BATTERY;
474
	else if (strncmp("balanced", buf, strlen("balanced")) == 0)
475
		rdev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED;
476
	else if (strncmp("performance", buf, strlen("performance")) == 0)
477
		rdev->pm.dpm.user_state = POWER_STATE_TYPE_PERFORMANCE;
478
	else {
479
		mutex_unlock(&rdev->pm.mutex);
480
		count = -EINVAL;
481
		goto fail;
482
	}
483
	mutex_unlock(&rdev->pm.mutex);
484
 
485
	/* Can't set dpm state when the card is off */
486
	if (!(rdev->flags & RADEON_IS_PX) ||
487
	    (ddev->switch_power_state == DRM_SWITCH_POWER_ON))
6104 serge 488
		radeon_pm_compute_clocks(rdev);
5078 serge 489
 
490
fail:
491
	return count;
492
}
493
 
494
static ssize_t radeon_get_dpm_forced_performance_level(struct device *dev,
495
						       struct device_attribute *attr,
496
						       char *buf)
497
{
498
	struct drm_device *ddev = dev_get_drvdata(dev);
499
	struct radeon_device *rdev = ddev->dev_private;
500
	enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level;
501
 
502
	if  ((rdev->flags & RADEON_IS_PX) &&
503
	     (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
504
		return snprintf(buf, PAGE_SIZE, "off\n");
505
 
506
	return snprintf(buf, PAGE_SIZE, "%s\n",
507
			(level == RADEON_DPM_FORCED_LEVEL_AUTO) ? "auto" :
508
			(level == RADEON_DPM_FORCED_LEVEL_LOW) ? "low" : "high");
509
}
510
 
511
static ssize_t radeon_set_dpm_forced_performance_level(struct device *dev,
512
						       struct device_attribute *attr,
513
						       const char *buf,
514
						       size_t count)
515
{
516
	struct drm_device *ddev = dev_get_drvdata(dev);
517
	struct radeon_device *rdev = ddev->dev_private;
518
	enum radeon_dpm_forced_level level;
519
	int ret = 0;
520
 
521
	/* Can't force performance level when the card is off */
522
	if  ((rdev->flags & RADEON_IS_PX) &&
523
	     (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
524
		return -EINVAL;
525
 
526
	mutex_lock(&rdev->pm.mutex);
527
	if (strncmp("low", buf, strlen("low")) == 0) {
528
		level = RADEON_DPM_FORCED_LEVEL_LOW;
529
	} else if (strncmp("high", buf, strlen("high")) == 0) {
530
		level = RADEON_DPM_FORCED_LEVEL_HIGH;
531
	} else if (strncmp("auto", buf, strlen("auto")) == 0) {
532
		level = RADEON_DPM_FORCED_LEVEL_AUTO;
533
	} else {
534
		count = -EINVAL;
535
		goto fail;
536
	}
537
	if (rdev->asic->dpm.force_performance_level) {
538
		if (rdev->pm.dpm.thermal_active) {
539
			count = -EINVAL;
540
			goto fail;
541
		}
542
		ret = radeon_dpm_force_performance_level(rdev, level);
543
		if (ret)
544
			count = -EINVAL;
545
	}
546
fail:
547
	mutex_unlock(&rdev->pm.mutex);
548
 
549
	return count;
550
}
551
 
552
 
1963 serge 553
static ssize_t radeon_hwmon_show_temp(struct device *dev,
554
				      struct device_attribute *attr,
555
				      char *buf)
556
{
5078 serge 557
	struct radeon_device *rdev = dev_get_drvdata(dev);
558
	struct drm_device *ddev = rdev->ddev;
2997 Serge 559
	int temp;
1963 serge 560
 
5078 serge 561
	/* Can't get temperature when the card is off */
562
	if  ((rdev->flags & RADEON_IS_PX) &&
563
	     (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
564
		return -EINVAL;
565
 
566
	if (rdev->asic->pm.get_temperature)
567
		temp = radeon_get_temperature(rdev);
568
	else
569
		temp = 0;
570
 
571
	return snprintf(buf, PAGE_SIZE, "%d\n", temp);
572
}
573
 
574
static ssize_t radeon_hwmon_show_temp_thresh(struct device *dev,
575
					     struct device_attribute *attr,
576
					     char *buf)
577
{
578
	struct radeon_device *rdev = dev_get_drvdata(dev);
579
//	int hyst = to_sensor_dev_attr(attr)->index;
580
	int temp;
581
 
582
//	if (hyst)
583
//		temp = rdev->pm.dpm.thermal.min_temp;
584
//	else
585
		temp = rdev->pm.dpm.thermal.max_temp;
586
 
587
	return snprintf(buf, PAGE_SIZE, "%d\n", temp);
588
}
589
 
590
 
591
static struct attribute *hwmon_attributes[] = {
592
//	&sensor_dev_attr_temp1_input.dev_attr.attr,
593
//	&sensor_dev_attr_temp1_crit.dev_attr.attr,
594
//	&sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
595
	NULL
596
};
597
 
598
 
599
 
600
static int radeon_hwmon_init(struct radeon_device *rdev)
601
{
602
	int err = 0;
603
 
1963 serge 604
	switch (rdev->pm.int_thermal_type) {
605
	case THERMAL_TYPE_RV6XX:
606
	case THERMAL_TYPE_RV770:
607
	case THERMAL_TYPE_EVERGREEN:
608
	case THERMAL_TYPE_NI:
2997 Serge 609
	case THERMAL_TYPE_SUMO:
610
	case THERMAL_TYPE_SI:
5078 serge 611
	case THERMAL_TYPE_CI:
612
	case THERMAL_TYPE_KV:
613
		if (rdev->asic->pm.get_temperature == NULL)
614
			return err;
615
 
2997 Serge 616
		break;
1430 serge 617
	default:
1963 serge 618
		break;
1430 serge 619
	}
1963 serge 620
 
5078 serge 621
	return err;
1430 serge 622
}
623
 
5078 serge 624
static void radeon_hwmon_fini(struct radeon_device *rdev)
1430 serge 625
{
5078 serge 626
//   if (rdev->pm.int_hwmon_dev)
627
//       hwmon_device_unregister(rdev->pm.int_hwmon_dev);
1963 serge 628
}
1430 serge 629
 
5078 serge 630
static void radeon_dpm_thermal_work_handler(struct work_struct *work)
1963 serge 631
{
5078 serge 632
	struct radeon_device *rdev =
633
		container_of(work, struct radeon_device,
634
			     pm.dpm.thermal.work);
635
	/* switch to the thermal state */
636
	enum radeon_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL;
1430 serge 637
 
5078 serge 638
	if (!rdev->pm.dpm_enabled)
639
		return;
1963 serge 640
 
5078 serge 641
	if (rdev->asic->pm.get_temperature) {
642
		int temp = radeon_get_temperature(rdev);
643
 
644
		if (temp < rdev->pm.dpm.thermal.min_temp)
645
			/* switch back the user state */
646
			dpm_state = rdev->pm.dpm.user_state;
647
	} else {
648
		if (rdev->pm.dpm.thermal.high_to_low)
649
			/* switch back the user state */
650
			dpm_state = rdev->pm.dpm.user_state;
651
	}
652
	mutex_lock(&rdev->pm.mutex);
653
	if (dpm_state == POWER_STATE_TYPE_INTERNAL_THERMAL)
654
		rdev->pm.dpm.thermal_active = true;
655
	else
656
		rdev->pm.dpm.thermal_active = false;
657
	rdev->pm.dpm.state = dpm_state;
658
	mutex_unlock(&rdev->pm.mutex);
659
 
660
	radeon_pm_compute_clocks(rdev);
1430 serge 661
}
662
 
6104 serge 663
static bool radeon_dpm_single_display(struct radeon_device *rdev)
1963 serge 664
{
5078 serge 665
	bool single_display = (rdev->pm.dpm.new_active_crtc_count < 2) ?
666
		true : false;
667
 
668
	/* check if the vblank period is too short to adjust the mclk */
669
	if (single_display && rdev->asic->dpm.vblank_too_short) {
670
		if (radeon_dpm_vblank_too_short(rdev))
671
			single_display = false;
672
	}
673
 
6104 serge 674
	/* 120hz tends to be problematic even if they are under the
675
	 * vblank limit.
676
	 */
677
	if (single_display && (r600_dpm_get_vrefresh(rdev) >= 120))
678
		single_display = false;
679
 
680
	return single_display;
681
}
682
 
683
static struct radeon_ps *radeon_dpm_pick_power_state(struct radeon_device *rdev,
684
						     enum radeon_pm_state_type dpm_state)
685
{
686
	int i;
687
	struct radeon_ps *ps;
688
	u32 ui_class;
689
	bool single_display = radeon_dpm_single_display(rdev);
690
 
5078 serge 691
	/* certain older asics have a separare 3D performance state,
692
	 * so try that first if the user selected performance
693
	 */
694
	if (dpm_state == POWER_STATE_TYPE_PERFORMANCE)
695
		dpm_state = POWER_STATE_TYPE_INTERNAL_3DPERF;
696
	/* balanced states don't exist at the moment */
697
	if (dpm_state == POWER_STATE_TYPE_BALANCED)
698
		dpm_state = POWER_STATE_TYPE_PERFORMANCE;
699
 
700
restart_search:
701
	/* Pick the best power state based on current conditions */
702
	for (i = 0; i < rdev->pm.dpm.num_ps; i++) {
703
		ps = &rdev->pm.dpm.ps[i];
704
		ui_class = ps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK;
705
		switch (dpm_state) {
706
		/* user states */
707
		case POWER_STATE_TYPE_BATTERY:
708
			if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) {
709
				if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
710
					if (single_display)
711
						return ps;
712
				} else
713
					return ps;
714
			}
715
			break;
716
		case POWER_STATE_TYPE_BALANCED:
717
			if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BALANCED) {
718
				if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
719
					if (single_display)
720
						return ps;
721
				} else
722
					return ps;
723
			}
6104 serge 724
			break;
5078 serge 725
		case POWER_STATE_TYPE_PERFORMANCE:
726
			if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) {
727
				if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
728
					if (single_display)
729
						return ps;
730
				} else
731
					return ps;
732
			}
733
			break;
734
		/* internal states */
735
		case POWER_STATE_TYPE_INTERNAL_UVD:
736
			if (rdev->pm.dpm.uvd_ps)
737
				return rdev->pm.dpm.uvd_ps;
738
			else
739
				break;
740
		case POWER_STATE_TYPE_INTERNAL_UVD_SD:
741
			if (ps->class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
742
				return ps;
743
			break;
744
		case POWER_STATE_TYPE_INTERNAL_UVD_HD:
745
			if (ps->class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
746
				return ps;
747
			break;
748
		case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
749
			if (ps->class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
750
				return ps;
751
			break;
752
		case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
753
			if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
754
				return ps;
755
			break;
756
		case POWER_STATE_TYPE_INTERNAL_BOOT:
757
			return rdev->pm.dpm.boot_ps;
758
		case POWER_STATE_TYPE_INTERNAL_THERMAL:
759
			if (ps->class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
760
				return ps;
761
			break;
762
		case POWER_STATE_TYPE_INTERNAL_ACPI:
763
			if (ps->class & ATOM_PPLIB_CLASSIFICATION_ACPI)
764
				return ps;
765
			break;
766
		case POWER_STATE_TYPE_INTERNAL_ULV:
767
			if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
768
				return ps;
6104 serge 769
			break;
5078 serge 770
		case POWER_STATE_TYPE_INTERNAL_3DPERF:
771
			if (ps->class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
772
				return ps;
773
			break;
6104 serge 774
		default:
775
			break;
5078 serge 776
		}
777
	}
778
	/* use a fallback state if we didn't match */
779
	switch (dpm_state) {
780
	case POWER_STATE_TYPE_INTERNAL_UVD_SD:
781
		dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
782
		goto restart_search;
783
	case POWER_STATE_TYPE_INTERNAL_UVD_HD:
784
	case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
785
	case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
786
		if (rdev->pm.dpm.uvd_ps) {
787
			return rdev->pm.dpm.uvd_ps;
788
		} else {
789
			dpm_state = POWER_STATE_TYPE_PERFORMANCE;
790
			goto restart_search;
791
		}
792
	case POWER_STATE_TYPE_INTERNAL_THERMAL:
793
		dpm_state = POWER_STATE_TYPE_INTERNAL_ACPI;
794
		goto restart_search;
795
	case POWER_STATE_TYPE_INTERNAL_ACPI:
796
		dpm_state = POWER_STATE_TYPE_BATTERY;
797
		goto restart_search;
798
	case POWER_STATE_TYPE_BATTERY:
799
	case POWER_STATE_TYPE_BALANCED:
800
	case POWER_STATE_TYPE_INTERNAL_3DPERF:
801
		dpm_state = POWER_STATE_TYPE_PERFORMANCE;
802
		goto restart_search;
803
	default:
804
		break;
805
	}
806
 
807
	return NULL;
1963 serge 808
}
809
 
5078 serge 810
static void radeon_dpm_change_power_state_locked(struct radeon_device *rdev)
1963 serge 811
{
5078 serge 812
	int i;
813
	struct radeon_ps *ps;
814
	enum radeon_pm_state_type dpm_state;
815
	int ret;
6104 serge 816
	bool single_display = radeon_dpm_single_display(rdev);
5078 serge 817
 
818
	/* if dpm init failed */
819
	if (!rdev->pm.dpm_enabled)
820
		return;
821
 
822
	if (rdev->pm.dpm.user_state != rdev->pm.dpm.state) {
823
		/* add other state override checks here */
824
		if ((!rdev->pm.dpm.thermal_active) &&
825
		    (!rdev->pm.dpm.uvd_active))
826
			rdev->pm.dpm.state = rdev->pm.dpm.user_state;
827
	}
828
	dpm_state = rdev->pm.dpm.state;
829
 
830
	ps = radeon_dpm_pick_power_state(rdev, dpm_state);
831
	if (ps)
832
		rdev->pm.dpm.requested_ps = ps;
833
	else
834
		return;
835
 
836
	/* no need to reprogram if nothing changed unless we are on BTC+ */
837
	if (rdev->pm.dpm.current_ps == rdev->pm.dpm.requested_ps) {
838
		/* vce just modifies an existing state so force a change */
839
		if (ps->vce_active != rdev->pm.dpm.vce_active)
840
			goto force;
6104 serge 841
		/* user has made a display change (such as timing) */
842
		if (rdev->pm.dpm.single_display != single_display)
843
			goto force;
5078 serge 844
		if ((rdev->family < CHIP_BARTS) || (rdev->flags & RADEON_IS_IGP)) {
845
			/* for pre-BTC and APUs if the num crtcs changed but state is the same,
846
			 * all we need to do is update the display configuration.
847
			 */
848
			if (rdev->pm.dpm.new_active_crtcs != rdev->pm.dpm.current_active_crtcs) {
849
				/* update display watermarks based on new power state */
850
				radeon_bandwidth_update(rdev);
851
				/* update displays */
852
				radeon_dpm_display_configuration_changed(rdev);
853
				rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs;
854
				rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count;
855
			}
856
			return;
857
		} else {
858
			/* for BTC+ if the num crtcs hasn't changed and state is the same,
859
			 * nothing to do, if the num crtcs is > 1 and state is the same,
860
			 * update display configuration.
861
			 */
862
			if (rdev->pm.dpm.new_active_crtcs ==
863
			    rdev->pm.dpm.current_active_crtcs) {
864
				return;
865
			} else {
866
				if ((rdev->pm.dpm.current_active_crtc_count > 1) &&
867
				    (rdev->pm.dpm.new_active_crtc_count > 1)) {
868
					/* update display watermarks based on new power state */
869
					radeon_bandwidth_update(rdev);
870
					/* update displays */
871
					radeon_dpm_display_configuration_changed(rdev);
872
					rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs;
873
					rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count;
874
					return;
875
				}
876
			}
877
		}
878
	}
879
 
880
force:
881
	if (radeon_dpm == 1) {
882
		printk("switching from power state:\n");
883
		radeon_dpm_print_power_state(rdev, rdev->pm.dpm.current_ps);
884
		printk("switching to power state:\n");
885
		radeon_dpm_print_power_state(rdev, rdev->pm.dpm.requested_ps);
886
	}
887
 
5346 serge 888
	down_write(&rdev->pm.mclk_lock);
5078 serge 889
	mutex_lock(&rdev->ring_lock);
890
 
891
	/* update whether vce is active */
892
	ps->vce_active = rdev->pm.dpm.vce_active;
893
 
894
	ret = radeon_dpm_pre_set_power_state(rdev);
895
	if (ret)
896
		goto done;
897
 
898
	/* update display watermarks based on new power state */
899
	radeon_bandwidth_update(rdev);
900
	/* update displays */
901
	radeon_dpm_display_configuration_changed(rdev);
902
 
903
	rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs;
904
	rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count;
6104 serge 905
	rdev->pm.dpm.single_display = single_display;
5078 serge 906
 
907
	/* wait for the rings to drain */
908
	for (i = 0; i < RADEON_NUM_RINGS; i++) {
909
		struct radeon_ring *ring = &rdev->ring[i];
910
		if (ring->ready)
911
			radeon_fence_wait_empty(rdev, i);
912
	}
913
 
914
	/* program the new power state */
915
	radeon_dpm_set_power_state(rdev);
916
 
917
	/* update current power state */
918
	rdev->pm.dpm.current_ps = rdev->pm.dpm.requested_ps;
919
 
920
	radeon_dpm_post_set_power_state(rdev);
921
 
922
	if (rdev->asic->dpm.force_performance_level) {
923
		if (rdev->pm.dpm.thermal_active) {
924
			enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level;
925
			/* force low perf level for thermal */
926
			radeon_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_LOW);
927
			/* save the user's level */
928
			rdev->pm.dpm.forced_level = level;
929
		} else {
930
			/* otherwise, user selected level */
931
			radeon_dpm_force_performance_level(rdev, rdev->pm.dpm.forced_level);
932
		}
933
	}
934
 
935
done:
936
	mutex_unlock(&rdev->ring_lock);
5346 serge 937
	up_write(&rdev->pm.mclk_lock);
5078 serge 938
}
939
 
940
void radeon_dpm_enable_uvd(struct radeon_device *rdev, bool enable)
941
{
942
	enum radeon_pm_state_type dpm_state;
943
 
944
	if (rdev->asic->dpm.powergate_uvd) {
945
		mutex_lock(&rdev->pm.mutex);
946
		/* don't powergate anything if we
947
		   have active but pause streams */
948
		enable |= rdev->pm.dpm.sd > 0;
949
		enable |= rdev->pm.dpm.hd > 0;
950
		/* enable/disable UVD */
951
		radeon_dpm_powergate_uvd(rdev, !enable);
952
		mutex_unlock(&rdev->pm.mutex);
953
	} else {
954
		if (enable) {
955
			mutex_lock(&rdev->pm.mutex);
956
			rdev->pm.dpm.uvd_active = true;
957
			/* disable this for now */
958
#if 0
959
			if ((rdev->pm.dpm.sd == 1) && (rdev->pm.dpm.hd == 0))
960
				dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_SD;
961
			else if ((rdev->pm.dpm.sd == 2) && (rdev->pm.dpm.hd == 0))
962
				dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
963
			else if ((rdev->pm.dpm.sd == 0) && (rdev->pm.dpm.hd == 1))
964
				dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
965
			else if ((rdev->pm.dpm.sd == 0) && (rdev->pm.dpm.hd == 2))
966
				dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD2;
967
			else
968
#endif
969
				dpm_state = POWER_STATE_TYPE_INTERNAL_UVD;
970
			rdev->pm.dpm.state = dpm_state;
971
			mutex_unlock(&rdev->pm.mutex);
972
		} else {
973
			mutex_lock(&rdev->pm.mutex);
974
			rdev->pm.dpm.uvd_active = false;
975
			mutex_unlock(&rdev->pm.mutex);
976
		}
977
 
978
		radeon_pm_compute_clocks(rdev);
979
	}
980
}
981
 
982
void radeon_dpm_enable_vce(struct radeon_device *rdev, bool enable)
983
{
984
	if (enable) {
985
		mutex_lock(&rdev->pm.mutex);
986
		rdev->pm.dpm.vce_active = true;
987
		/* XXX select vce level based on ring/task */
988
		rdev->pm.dpm.vce_level = RADEON_VCE_LEVEL_AC_ALL;
989
		mutex_unlock(&rdev->pm.mutex);
990
	} else {
991
		mutex_lock(&rdev->pm.mutex);
992
		rdev->pm.dpm.vce_active = false;
993
		mutex_unlock(&rdev->pm.mutex);
994
	}
995
 
996
	radeon_pm_compute_clocks(rdev);
997
}
998
 
999
static void radeon_pm_suspend_old(struct radeon_device *rdev)
1000
{
1963 serge 1001
	mutex_lock(&rdev->pm.mutex);
1002
	if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
1003
		if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE)
1004
			rdev->pm.dynpm_state = DYNPM_STATE_SUSPENDED;
1005
	}
1006
	mutex_unlock(&rdev->pm.mutex);
1007
 
1008
}
1009
 
5078 serge 1010
static void radeon_pm_suspend_dpm(struct radeon_device *rdev)
1963 serge 1011
{
5078 serge 1012
	mutex_lock(&rdev->pm.mutex);
1013
	/* disable dpm */
1014
	radeon_dpm_disable(rdev);
1015
	/* reset the power state */
1016
	rdev->pm.dpm.current_ps = rdev->pm.dpm.requested_ps = rdev->pm.dpm.boot_ps;
1017
	rdev->pm.dpm_enabled = false;
1018
	mutex_unlock(&rdev->pm.mutex);
1019
}
1020
 
1021
void radeon_pm_suspend(struct radeon_device *rdev)
1022
{
1023
	if (rdev->pm.pm_method == PM_METHOD_DPM)
1024
		radeon_pm_suspend_dpm(rdev);
1025
	else
1026
		radeon_pm_suspend_old(rdev);
1027
}
1028
 
1029
static void radeon_pm_resume_old(struct radeon_device *rdev)
1030
{
1031
	/* set up the default clocks if the MC ucode is loaded */
1032
	if ((rdev->family >= CHIP_BARTS) &&
1033
	    (rdev->family <= CHIP_CAYMAN) &&
1034
	    rdev->mc_fw) {
1035
		if (rdev->pm.default_vddc)
1036
			radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
1037
						SET_VOLTAGE_TYPE_ASIC_VDDC);
1038
		if (rdev->pm.default_vddci)
1039
			radeon_atom_set_voltage(rdev, rdev->pm.default_vddci,
1040
						SET_VOLTAGE_TYPE_ASIC_VDDCI);
1041
		if (rdev->pm.default_sclk)
1042
			radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
1043
		if (rdev->pm.default_mclk)
1044
			radeon_set_memory_clock(rdev, rdev->pm.default_mclk);
1045
	}
1963 serge 1046
	/* asic init will reset the default power state */
1047
	mutex_lock(&rdev->pm.mutex);
1048
	rdev->pm.current_power_state_index = rdev->pm.default_power_state_index;
1049
	rdev->pm.current_clock_mode_index = 0;
1050
	rdev->pm.current_sclk = rdev->pm.default_sclk;
1051
	rdev->pm.current_mclk = rdev->pm.default_mclk;
5078 serge 1052
	if (rdev->pm.power_state) {
6104 serge 1053
		rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage;
1054
		rdev->pm.current_vddci = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.vddci;
5078 serge 1055
	}
1963 serge 1056
	if (rdev->pm.pm_method == PM_METHOD_DYNPM
1057
	    && rdev->pm.dynpm_state == DYNPM_STATE_SUSPENDED) {
1058
		rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
1059
//		schedule_delayed_work(&rdev->pm.dynpm_idle_work,
1060
//					msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
1061
	}
1062
	mutex_unlock(&rdev->pm.mutex);
1063
	radeon_pm_compute_clocks(rdev);
1064
}
1065
 
5078 serge 1066
static void radeon_pm_resume_dpm(struct radeon_device *rdev)
1268 serge 1067
{
1963 serge 1068
	int ret;
1430 serge 1069
 
5078 serge 1070
	/* asic init will reset to the boot state */
1071
	mutex_lock(&rdev->pm.mutex);
1072
	rdev->pm.dpm.current_ps = rdev->pm.dpm.requested_ps = rdev->pm.dpm.boot_ps;
1073
	radeon_dpm_setup_asic(rdev);
1074
	ret = radeon_dpm_enable(rdev);
1075
	mutex_unlock(&rdev->pm.mutex);
1076
	if (ret)
1077
		goto dpm_resume_fail;
1078
	rdev->pm.dpm_enabled = true;
1079
	return;
1080
 
1081
dpm_resume_fail:
1082
	DRM_ERROR("radeon: dpm resume failed\n");
1083
	if ((rdev->family >= CHIP_BARTS) &&
1084
	    (rdev->family <= CHIP_CAYMAN) &&
1085
	    rdev->mc_fw) {
1086
		if (rdev->pm.default_vddc)
1087
			radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
1088
						SET_VOLTAGE_TYPE_ASIC_VDDC);
1089
		if (rdev->pm.default_vddci)
1090
			radeon_atom_set_voltage(rdev, rdev->pm.default_vddci,
1091
						SET_VOLTAGE_TYPE_ASIC_VDDCI);
1092
		if (rdev->pm.default_sclk)
1093
			radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
1094
		if (rdev->pm.default_mclk)
1095
			radeon_set_memory_clock(rdev, rdev->pm.default_mclk);
1096
	}
1097
}
1098
 
1099
void radeon_pm_resume(struct radeon_device *rdev)
1100
{
1101
	if (rdev->pm.pm_method == PM_METHOD_DPM)
1102
		radeon_pm_resume_dpm(rdev);
1103
	else
1104
		radeon_pm_resume_old(rdev);
1105
}
1106
 
1107
static int radeon_pm_init_old(struct radeon_device *rdev)
1108
{
1109
	int ret;
1110
 
1963 serge 1111
	rdev->pm.profile = PM_PROFILE_DEFAULT;
1112
	rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
1113
	rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
1114
	rdev->pm.dynpm_can_upclock = true;
1115
	rdev->pm.dynpm_can_downclock = true;
1116
	rdev->pm.default_sclk = rdev->clock.default_sclk;
1117
	rdev->pm.default_mclk = rdev->clock.default_mclk;
1118
	rdev->pm.current_sclk = rdev->clock.default_sclk;
1119
	rdev->pm.current_mclk = rdev->clock.default_mclk;
1120
	rdev->pm.int_thermal_type = THERMAL_TYPE_NONE;
1121
 
1430 serge 1122
	if (rdev->bios) {
1123
		if (rdev->is_atom_bios)
1124
			radeon_atombios_get_power_modes(rdev);
1125
		else
1126
			radeon_combios_get_power_modes(rdev);
1963 serge 1127
		radeon_pm_print_states(rdev);
1128
		radeon_pm_init_profile(rdev);
5078 serge 1129
		/* set up the default clocks if the MC ucode is loaded */
1130
		if ((rdev->family >= CHIP_BARTS) &&
1131
		    (rdev->family <= CHIP_CAYMAN) &&
1132
		    rdev->mc_fw) {
1133
			if (rdev->pm.default_vddc)
1134
				radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
1135
							SET_VOLTAGE_TYPE_ASIC_VDDC);
1136
			if (rdev->pm.default_vddci)
1137
				radeon_atom_set_voltage(rdev, rdev->pm.default_vddci,
1138
							SET_VOLTAGE_TYPE_ASIC_VDDCI);
1139
			if (rdev->pm.default_sclk)
1140
				radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
1141
			if (rdev->pm.default_mclk)
1142
				radeon_set_memory_clock(rdev, rdev->pm.default_mclk);
1143
		}
1430 serge 1144
	}
1145
 
1963 serge 1146
	/* set up the internal thermal sensor if applicable */
1147
	ret = radeon_hwmon_init(rdev);
1148
	if (ret)
1149
		return ret;
1268 serge 1150
 
5078 serge 1151
//	INIT_DELAYED_WORK(&rdev->pm.dynpm_idle_work, radeon_dynpm_idle_work_handler);
1152
 
1963 serge 1153
	if (rdev->pm.num_power_states > 1) {
5078 serge 1154
		/* where's the best place to put these? */
1430 serge 1155
 
5078 serge 1156
 
1963 serge 1157
		DRM_INFO("radeon: power management initialized\n");
1430 serge 1158
	}
1159
 
1268 serge 1160
	return 0;
1161
}
1162
 
5078 serge 1163
static void radeon_dpm_print_power_states(struct radeon_device *rdev)
1963 serge 1164
{
5078 serge 1165
	int i;
1166
 
1167
	for (i = 0; i < rdev->pm.dpm.num_ps; i++) {
1168
		printk("== power state %d ==\n", i);
1169
		radeon_dpm_print_power_state(rdev, &rdev->pm.dpm.ps[i]);
1170
	}
1171
}
1172
 
1173
static int radeon_pm_init_dpm(struct radeon_device *rdev)
1174
{
1175
	int ret;
1176
 
1177
	/* default to balanced state */
1178
	rdev->pm.dpm.state = POWER_STATE_TYPE_BALANCED;
1179
	rdev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED;
1180
	rdev->pm.dpm.forced_level = RADEON_DPM_FORCED_LEVEL_AUTO;
1181
	rdev->pm.default_sclk = rdev->clock.default_sclk;
1182
	rdev->pm.default_mclk = rdev->clock.default_mclk;
1183
	rdev->pm.current_sclk = rdev->clock.default_sclk;
1184
	rdev->pm.current_mclk = rdev->clock.default_mclk;
1185
	rdev->pm.int_thermal_type = THERMAL_TYPE_NONE;
1186
 
1187
	if (rdev->bios && rdev->is_atom_bios)
1188
		radeon_atombios_get_power_modes(rdev);
1189
	else
1190
		return -EINVAL;
1191
 
1192
	/* set up the internal thermal sensor if applicable */
1193
	ret = radeon_hwmon_init(rdev);
1194
	if (ret)
1195
		return ret;
1196
 
1197
	INIT_WORK(&rdev->pm.dpm.thermal.work, radeon_dpm_thermal_work_handler);
1198
	mutex_lock(&rdev->pm.mutex);
1199
	radeon_dpm_init(rdev);
1200
	rdev->pm.dpm.current_ps = rdev->pm.dpm.requested_ps = rdev->pm.dpm.boot_ps;
1201
	if (radeon_dpm == 1)
1202
		radeon_dpm_print_power_states(rdev);
1203
	radeon_dpm_setup_asic(rdev);
1204
	ret = radeon_dpm_enable(rdev);
1205
	mutex_unlock(&rdev->pm.mutex);
1206
	if (ret)
1207
		goto dpm_failed;
1208
	rdev->pm.dpm_enabled = true;
1209
 
1210
	DRM_INFO("radeon: dpm initialized\n");
1211
 
1212
	return 0;
1213
 
1214
dpm_failed:
1215
	rdev->pm.dpm_enabled = false;
1216
	if ((rdev->family >= CHIP_BARTS) &&
1217
	    (rdev->family <= CHIP_CAYMAN) &&
1218
	    rdev->mc_fw) {
1219
		if (rdev->pm.default_vddc)
1220
			radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
1221
						SET_VOLTAGE_TYPE_ASIC_VDDC);
1222
		if (rdev->pm.default_vddci)
1223
			radeon_atom_set_voltage(rdev, rdev->pm.default_vddci,
1224
						SET_VOLTAGE_TYPE_ASIC_VDDCI);
1225
		if (rdev->pm.default_sclk)
1226
			radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
1227
		if (rdev->pm.default_mclk)
1228
			radeon_set_memory_clock(rdev, rdev->pm.default_mclk);
1229
	}
1230
	DRM_ERROR("radeon: dpm initialization failed\n");
1231
	return ret;
1232
}
1233
 
6104 serge 1234
struct radeon_dpm_quirk {
1235
	u32 chip_vendor;
1236
	u32 chip_device;
1237
	u32 subsys_vendor;
1238
	u32 subsys_device;
1239
};
1240
 
1241
/* cards with dpm stability problems */
1242
static struct radeon_dpm_quirk radeon_dpm_quirk_list[] = {
1243
	/* TURKS - https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1386534 */
1244
	{ PCI_VENDOR_ID_ATI, 0x6759, 0x1682, 0x3195 },
1245
	/* TURKS - https://bugzilla.kernel.org/show_bug.cgi?id=83731 */
1246
	{ PCI_VENDOR_ID_ATI, 0x6840, 0x1179, 0xfb81 },
1247
	{ 0, 0, 0, 0 },
1248
};
1249
 
5078 serge 1250
int radeon_pm_init(struct radeon_device *rdev)
1251
{
6104 serge 1252
	struct radeon_dpm_quirk *p = radeon_dpm_quirk_list;
1253
	bool disable_dpm = false;
1254
 
1255
	/* Apply dpm quirks */
1256
	while (p && p->chip_device != 0) {
1257
		if (rdev->pdev->vendor == p->chip_vendor &&
1258
		    rdev->pdev->device == p->chip_device &&
1259
		    rdev->pdev->subsystem_vendor == p->subsys_vendor &&
1260
		    rdev->pdev->subsystem_device == p->subsys_device) {
1261
			disable_dpm = true;
1262
			break;
1263
		}
1264
		++p;
1265
	}
1266
 
5078 serge 1267
	/* enable dpm on rv6xx+ */
1268
	switch (rdev->family) {
1269
	case CHIP_RV610:
1270
	case CHIP_RV630:
1271
	case CHIP_RV620:
1272
	case CHIP_RV635:
1273
	case CHIP_RV670:
1274
	case CHIP_RS780:
1275
	case CHIP_RS880:
1276
	case CHIP_RV770:
1277
		/* DPM requires the RLC, RV770+ dGPU requires SMC */
1278
		if (!rdev->rlc_fw)
1279
			rdev->pm.pm_method = PM_METHOD_PROFILE;
1280
		else if ((rdev->family >= CHIP_RV770) &&
1281
			 (!(rdev->flags & RADEON_IS_IGP)) &&
1282
			 (!rdev->smc_fw))
1283
			rdev->pm.pm_method = PM_METHOD_PROFILE;
1284
		else if (radeon_dpm == 1)
1285
			rdev->pm.pm_method = PM_METHOD_DPM;
1286
		else
1287
			rdev->pm.pm_method = PM_METHOD_PROFILE;
1288
		break;
1289
	case CHIP_RV730:
1290
	case CHIP_RV710:
1291
	case CHIP_RV740:
1292
	case CHIP_CEDAR:
1293
	case CHIP_REDWOOD:
1294
	case CHIP_JUNIPER:
1295
	case CHIP_CYPRESS:
1296
	case CHIP_HEMLOCK:
1297
	case CHIP_PALM:
1298
	case CHIP_SUMO:
1299
	case CHIP_SUMO2:
1300
	case CHIP_BARTS:
1301
	case CHIP_TURKS:
1302
	case CHIP_CAICOS:
1303
	case CHIP_CAYMAN:
1304
	case CHIP_ARUBA:
1305
	case CHIP_TAHITI:
1306
	case CHIP_PITCAIRN:
1307
	case CHIP_VERDE:
1308
	case CHIP_OLAND:
1309
	case CHIP_HAINAN:
1310
	case CHIP_BONAIRE:
1311
	case CHIP_KABINI:
1312
	case CHIP_KAVERI:
1313
	case CHIP_HAWAII:
1314
	case CHIP_MULLINS:
1315
		/* DPM requires the RLC, RV770+ dGPU requires SMC */
1316
		if (!rdev->rlc_fw)
1317
			rdev->pm.pm_method = PM_METHOD_PROFILE;
1318
		else if ((rdev->family >= CHIP_RV770) &&
1319
			 (!(rdev->flags & RADEON_IS_IGP)) &&
1320
			 (!rdev->smc_fw))
1321
			rdev->pm.pm_method = PM_METHOD_PROFILE;
6104 serge 1322
		else if (disable_dpm && (radeon_dpm == -1))
1323
			rdev->pm.pm_method = PM_METHOD_PROFILE;
5078 serge 1324
		else if (radeon_dpm == 0)
1325
			rdev->pm.pm_method = PM_METHOD_PROFILE;
1326
		else
1327
			rdev->pm.pm_method = PM_METHOD_DPM;
1328
		break;
1329
	default:
1330
		/* default to profile method */
1331
		rdev->pm.pm_method = PM_METHOD_PROFILE;
1332
		break;
1333
	}
1334
 
1335
	if (rdev->pm.pm_method == PM_METHOD_DPM)
1336
		return radeon_pm_init_dpm(rdev);
1337
	else
1338
		return radeon_pm_init_old(rdev);
1339
}
1340
 
1341
int radeon_pm_late_init(struct radeon_device *rdev)
1342
{
1343
	int ret = 0;
1344
 
1345
	if (rdev->pm.pm_method == PM_METHOD_DPM) {
1346
		mutex_lock(&rdev->pm.mutex);
1347
		ret = radeon_dpm_late_enable(rdev);
1348
		mutex_unlock(&rdev->pm.mutex);
1349
	}
1350
	return ret;
1351
}
1352
 
1353
static void radeon_pm_fini_old(struct radeon_device *rdev)
1354
{
1963 serge 1355
	if (rdev->pm.num_power_states > 1) {
1356
		mutex_lock(&rdev->pm.mutex);
1357
		if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
1358
			rdev->pm.profile = PM_PROFILE_DEFAULT;
1359
			radeon_pm_update_profile(rdev);
1360
			radeon_pm_set_clocks(rdev);
1361
		} else if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
1362
			/* reset default clocks */
1363
			rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
1364
			rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
1365
			radeon_pm_set_clocks(rdev);
1366
		}
1367
		mutex_unlock(&rdev->pm.mutex);
1368
 
1369
//		cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work);
1370
 
5078 serge 1371
   }
1963 serge 1372
 
1373
	radeon_hwmon_fini(rdev);
6104 serge 1374
	kfree(rdev->pm.power_state);
1963 serge 1375
}
1376
 
5078 serge 1377
static void radeon_pm_fini_dpm(struct radeon_device *rdev)
1430 serge 1378
{
5078 serge 1379
	if (rdev->pm.num_power_states > 1) {
1380
		mutex_lock(&rdev->pm.mutex);
1381
		radeon_dpm_disable(rdev);
1382
		mutex_unlock(&rdev->pm.mutex);
1383
	}
1384
	radeon_dpm_fini(rdev);
1385
 
1386
	radeon_hwmon_fini(rdev);
1387
	kfree(rdev->pm.power_state);
1388
}
1389
 
1390
void radeon_pm_fini(struct radeon_device *rdev)
1391
{
1392
	if (rdev->pm.pm_method == PM_METHOD_DPM)
1393
		radeon_pm_fini_dpm(rdev);
1394
	else
1395
		radeon_pm_fini_old(rdev);
1396
}
1397
 
1398
static void radeon_pm_compute_clocks_old(struct radeon_device *rdev)
1399
{
1430 serge 1400
	struct drm_device *ddev = rdev->ddev;
1963 serge 1401
	struct drm_crtc *crtc;
1430 serge 1402
	struct radeon_crtc *radeon_crtc;
1403
 
1963 serge 1404
	if (rdev->pm.num_power_states < 2)
1430 serge 1405
		return;
1406
 
1407
	mutex_lock(&rdev->pm.mutex);
1408
 
1409
	rdev->pm.active_crtcs = 0;
1963 serge 1410
	rdev->pm.active_crtc_count = 0;
5078 serge 1411
	if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) {
6104 serge 1412
		list_for_each_entry(crtc,
1413
				    &ddev->mode_config.crtc_list, head) {
1414
			radeon_crtc = to_radeon_crtc(crtc);
1415
			if (radeon_crtc->enabled) {
1416
				rdev->pm.active_crtcs |= (1 << radeon_crtc->crtc_id);
1417
				rdev->pm.active_crtc_count++;
1418
			}
1430 serge 1419
		}
1420
	}
1421
 
1963 serge 1422
	if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
1423
		radeon_pm_update_profile(rdev);
1424
		radeon_pm_set_clocks(rdev);
1425
	} else if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
1426
		if (rdev->pm.dynpm_state != DYNPM_STATE_DISABLED) {
1427
			if (rdev->pm.active_crtc_count > 1) {
1428
				if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) {
1429
//                   cancel_delayed_work(&rdev->pm.dynpm_idle_work);
1430 serge 1430
 
1963 serge 1431
					rdev->pm.dynpm_state = DYNPM_STATE_PAUSED;
1432
					rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
1433
					radeon_pm_get_dynpm_state(rdev);
6104 serge 1434
					radeon_pm_set_clocks(rdev);
1430 serge 1435
 
1963 serge 1436
					DRM_DEBUG_DRIVER("radeon: dynamic power management deactivated\n");
6104 serge 1437
				}
1963 serge 1438
			} else if (rdev->pm.active_crtc_count == 1) {
6104 serge 1439
				/* TODO: Increase clocks if needed for current mode */
1430 serge 1440
 
1963 serge 1441
				if (rdev->pm.dynpm_state == DYNPM_STATE_MINIMUM) {
1442
					rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
1443
					rdev->pm.dynpm_planned_action = DYNPM_ACTION_UPCLOCK;
1444
					radeon_pm_get_dynpm_state(rdev);
6104 serge 1445
					radeon_pm_set_clocks(rdev);
1963 serge 1446
 
1447
//					schedule_delayed_work(&rdev->pm.dynpm_idle_work,
1448
//							   msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
1449
				} else if (rdev->pm.dynpm_state == DYNPM_STATE_PAUSED) {
1450
					rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
1451
//					schedule_delayed_work(&rdev->pm.dynpm_idle_work,
1452
//							   msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
1453
					DRM_DEBUG_DRIVER("radeon: dynamic power management activated\n");
6104 serge 1454
				}
1963 serge 1455
			} else { /* count == 0 */
1456
				if (rdev->pm.dynpm_state != DYNPM_STATE_MINIMUM) {
1457
//					cancel_delayed_work(&rdev->pm.dynpm_idle_work);
1458
 
1459
					rdev->pm.dynpm_state = DYNPM_STATE_MINIMUM;
1460
					rdev->pm.dynpm_planned_action = DYNPM_ACTION_MINIMUM;
1461
					radeon_pm_get_dynpm_state(rdev);
1462
					radeon_pm_set_clocks(rdev);
6104 serge 1463
				}
1464
			}
1430 serge 1465
		}
1466
	}
1467
 
1468
	mutex_unlock(&rdev->pm.mutex);
1469
}
1470
 
5078 serge 1471
static void radeon_pm_compute_clocks_dpm(struct radeon_device *rdev)
1472
{
1473
	struct drm_device *ddev = rdev->ddev;
1474
	struct drm_crtc *crtc;
1475
	struct radeon_crtc *radeon_crtc;
1476
 
1477
	if (!rdev->pm.dpm_enabled)
1478
		return;
1479
 
1480
	mutex_lock(&rdev->pm.mutex);
1481
 
1482
	/* update active crtc counts */
1483
	rdev->pm.dpm.new_active_crtcs = 0;
1484
	rdev->pm.dpm.new_active_crtc_count = 0;
1485
	if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) {
1486
		list_for_each_entry(crtc,
1487
				    &ddev->mode_config.crtc_list, head) {
1488
			radeon_crtc = to_radeon_crtc(crtc);
1489
			if (crtc->enabled) {
1490
				rdev->pm.dpm.new_active_crtcs |= (1 << radeon_crtc->crtc_id);
1491
				rdev->pm.dpm.new_active_crtc_count++;
1492
			}
1493
		}
1494
	}
1495
 
1496
	/* update battery/ac status */
1497
	if (power_supply_is_system_supplied() > 0)
1498
		rdev->pm.dpm.ac_power = true;
1499
	else
1500
		rdev->pm.dpm.ac_power = false;
1501
 
1502
	radeon_dpm_change_power_state_locked(rdev);
1503
 
1504
	mutex_unlock(&rdev->pm.mutex);
1505
 
1506
}
1507
 
1508
void radeon_pm_compute_clocks(struct radeon_device *rdev)
1509
{
1510
	if (rdev->pm.pm_method == PM_METHOD_DPM)
1511
		radeon_pm_compute_clocks_dpm(rdev);
1512
	else
1513
		radeon_pm_compute_clocks_old(rdev);
1514
}
1515
 
1963 serge 1516
static bool radeon_pm_in_vbl(struct radeon_device *rdev)
1430 serge 1517
{
1963 serge 1518
	int  crtc, vpos, hpos, vbl_status;
1430 serge 1519
	bool in_vbl = true;
1520
 
1963 serge 1521
	/* Iterate over all active crtc's. All crtc's must be in vblank,
1522
	 * otherwise return in_vbl == false.
1523
	 */
1524
	for (crtc = 0; (crtc < rdev->num_crtc) && in_vbl; crtc++) {
1525
		if (rdev->pm.active_crtcs & (1 << crtc)) {
6104 serge 1526
			vbl_status = radeon_get_crtc_scanoutpos(rdev->ddev,
1527
								crtc,
1528
								USE_REAL_VBLANKSTART,
1529
								&vpos, &hpos, NULL, NULL,
1530
								&rdev->mode_info.crtcs[crtc]->base.hwmode);
1963 serge 1531
			if ((vbl_status & DRM_SCANOUTPOS_VALID) &&
5271 serge 1532
			    !(vbl_status & DRM_SCANOUTPOS_IN_VBLANK))
1430 serge 1533
				in_vbl = false;
1534
		}
6104 serge 1535
	}
1963 serge 1536
 
1430 serge 1537
	return in_vbl;
1538
}
1539
 
1963 serge 1540
static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish)
1430 serge 1541
{
1963 serge 1542
	u32 stat_crtc = 0;
1543
	bool in_vbl = radeon_pm_in_vbl(rdev);
1430 serge 1544
 
1963 serge 1545
	if (in_vbl == false)
1546
		DRM_DEBUG_DRIVER("not in vbl for pm change %08x at %s\n", stat_crtc,
1547
			 finish ? "exit" : "entry");
1548
	return in_vbl;
1430 serge 1549
}
1550
 
1551
 
1268 serge 1552
/*
1553
 * Debugfs info
1554
 */
1555
#if defined(CONFIG_DEBUG_FS)
1556
 
1557
static int radeon_debugfs_pm_info(struct seq_file *m, void *data)
1558
{
1559
	struct drm_info_node *node = (struct drm_info_node *) m->private;
1560
	struct drm_device *dev = node->minor->dev;
1561
	struct radeon_device *rdev = dev->dev_private;
5078 serge 1562
	struct drm_device *ddev = rdev->ddev;
1268 serge 1563
 
5078 serge 1564
	if  ((rdev->flags & RADEON_IS_PX) &&
1565
	     (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) {
1566
		seq_printf(m, "PX asic powered off\n");
1567
	} else if (rdev->pm.dpm_enabled) {
1568
		mutex_lock(&rdev->pm.mutex);
1569
		if (rdev->asic->dpm.debugfs_print_current_performance_level)
1570
			radeon_dpm_debugfs_print_current_performance_level(rdev, m);
1571
		else
1572
			seq_printf(m, "Debugfs support not implemented for this asic\n");
1573
		mutex_unlock(&rdev->pm.mutex);
1574
	} else {
6104 serge 1575
		seq_printf(m, "default engine clock: %u0 kHz\n", rdev->pm.default_sclk);
1576
		/* radeon_get_engine_clock is not reliable on APUs so just print the current clock */
1577
		if ((rdev->family >= CHIP_PALM) && (rdev->flags & RADEON_IS_IGP))
1578
			seq_printf(m, "current engine clock: %u0 kHz\n", rdev->pm.current_sclk);
1579
		else
1580
			seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev));
1581
		seq_printf(m, "default memory clock: %u0 kHz\n", rdev->pm.default_mclk);
1582
		if (rdev->asic->pm.get_memory_clock)
1583
			seq_printf(m, "current memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev));
1584
		if (rdev->pm.current_vddc)
1585
			seq_printf(m, "voltage: %u mV\n", rdev->pm.current_vddc);
1586
		if (rdev->asic->pm.get_pcie_lanes)
1587
			seq_printf(m, "PCIE lanes: %d\n", radeon_get_pcie_lanes(rdev));
5078 serge 1588
	}
1268 serge 1589
 
1590
	return 0;
1591
}
1592
 
1593
static struct drm_info_list radeon_pm_info_list[] = {
1594
	{"radeon_pm_info", radeon_debugfs_pm_info, 0, NULL},
1595
};
1596
#endif
1597
 
1430 serge 1598
static int radeon_debugfs_pm_init(struct radeon_device *rdev)
1268 serge 1599
{
1600
#if defined(CONFIG_DEBUG_FS)
1601
	return radeon_debugfs_add_files(rdev, radeon_pm_info_list, ARRAY_SIZE(radeon_pm_info_list));
1602
#else
1603
	return 0;
1604
#endif
1605
}