Subversion Repositories Kolibri OS

Rev

Rev 6938 | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
1268 serge 1
/*
2
 * Permission is hereby granted, free of charge, to any person obtaining a
3
 * copy of this software and associated documentation files (the "Software"),
4
 * to deal in the Software without restriction, including without limitation
5
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
6
 * and/or sell copies of the Software, and to permit persons to whom the
7
 * Software is furnished to do so, subject to the following conditions:
8
 *
9
 * The above copyright notice and this permission notice shall be included in
10
 * all copies or substantial portions of the Software.
11
 *
12
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
13
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
15
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
16
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
17
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
18
 * OTHER DEALINGS IN THE SOFTWARE.
19
 *
20
 * Authors: Rafał Miłecki 
1430 serge 21
 *          Alex Deucher 
1268 serge 22
 */
2997 Serge 23
#include 
1268 serge 24
#include "radeon.h"
1430 serge 25
#include "avivod.h"
1986 serge 26
#include "atom.h"
6104 serge 27
#include "r600_dpm.h"
1268 serge 28
 
1430 serge 29
#define RADEON_IDLE_LOOP_MS 100
30
#define RADEON_RECLOCK_DELAY_MS 200
31
#define RADEON_WAIT_VBLANK_TIMEOUT 200
1268 serge 32
 
1963 serge 33
static const char *radeon_pm_state_type_name[5] = {
2997 Serge 34
	"",
1430 serge 35
	"Powersave",
36
	"Battery",
37
	"Balanced",
38
	"Performance",
39
};
40
 
1963 serge 41
static void radeon_dynpm_idle_work_handler(struct work_struct *work);
42
static int radeon_debugfs_pm_init(struct radeon_device *rdev);
43
static bool radeon_pm_in_vbl(struct radeon_device *rdev);
44
static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish);
45
static void radeon_pm_update_profile(struct radeon_device *rdev);
46
static void radeon_pm_set_clocks(struct radeon_device *rdev);
47
 
2997 Serge 48
int radeon_pm_get_type_index(struct radeon_device *rdev,
49
			     enum radeon_pm_state_type ps_type,
50
			     int instance)
51
{
52
	int i;
53
	int found_instance = -1;
1963 serge 54
 
2997 Serge 55
	for (i = 0; i < rdev->pm.num_power_states; i++) {
56
		if (rdev->pm.power_state[i].type == ps_type) {
57
			found_instance++;
58
			if (found_instance == instance)
59
				return i;
60
		}
61
	}
62
	/* return default if no match */
63
	return rdev->pm.default_power_state_index;
64
}
1963 serge 65
 
2997 Serge 66
void radeon_pm_acpi_event_handler(struct radeon_device *rdev)
1430 serge 67
{
5078 serge 68
	if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
69
		mutex_lock(&rdev->pm.mutex);
70
		if (power_supply_is_system_supplied() > 0)
71
			rdev->pm.dpm.ac_power = true;
72
		else
73
			rdev->pm.dpm.ac_power = false;
74
		if (rdev->family == CHIP_ARUBA) {
6104 serge 75
			if (rdev->asic->dpm.enable_bapm)
76
				radeon_dpm_enable_bapm(rdev, rdev->pm.dpm.ac_power);
5078 serge 77
		}
78
		mutex_unlock(&rdev->pm.mutex);
7146 serge 79
	} else if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
6104 serge 80
		if (rdev->pm.profile == PM_PROFILE_AUTO) {
81
			mutex_lock(&rdev->pm.mutex);
82
			radeon_pm_update_profile(rdev);
83
			radeon_pm_set_clocks(rdev);
84
			mutex_unlock(&rdev->pm.mutex);
1430 serge 85
		}
86
	}
87
}
88
 
1963 serge 89
static void radeon_pm_update_profile(struct radeon_device *rdev)
1430 serge 90
{
1963 serge 91
	switch (rdev->pm.profile) {
92
	case PM_PROFILE_DEFAULT:
93
		rdev->pm.profile_index = PM_PROFILE_DEFAULT_IDX;
1430 serge 94
		break;
1963 serge 95
	case PM_PROFILE_AUTO:
96
		if (power_supply_is_system_supplied() > 0) {
97
			if (rdev->pm.active_crtc_count > 1)
98
				rdev->pm.profile_index = PM_PROFILE_HIGH_MH_IDX;
99
			else
100
				rdev->pm.profile_index = PM_PROFILE_HIGH_SH_IDX;
1430 serge 101
		} else {
1963 serge 102
			if (rdev->pm.active_crtc_count > 1)
103
				rdev->pm.profile_index = PM_PROFILE_MID_MH_IDX;
104
			else
105
				rdev->pm.profile_index = PM_PROFILE_MID_SH_IDX;
1430 serge 106
		}
107
		break;
1963 serge 108
	case PM_PROFILE_LOW:
109
		if (rdev->pm.active_crtc_count > 1)
110
			rdev->pm.profile_index = PM_PROFILE_LOW_MH_IDX;
111
		else
112
			rdev->pm.profile_index = PM_PROFILE_LOW_SH_IDX;
1430 serge 113
		break;
1963 serge 114
	case PM_PROFILE_MID:
115
		if (rdev->pm.active_crtc_count > 1)
116
			rdev->pm.profile_index = PM_PROFILE_MID_MH_IDX;
117
		else
118
			rdev->pm.profile_index = PM_PROFILE_MID_SH_IDX;
119
		break;
120
	case PM_PROFILE_HIGH:
121
		if (rdev->pm.active_crtc_count > 1)
122
			rdev->pm.profile_index = PM_PROFILE_HIGH_MH_IDX;
123
		else
124
			rdev->pm.profile_index = PM_PROFILE_HIGH_SH_IDX;
125
		break;
1430 serge 126
	}
127
 
1963 serge 128
	if (rdev->pm.active_crtc_count == 0) {
129
		rdev->pm.requested_power_state_index =
130
			rdev->pm.profiles[rdev->pm.profile_index].dpms_off_ps_idx;
131
		rdev->pm.requested_clock_mode_index =
132
			rdev->pm.profiles[rdev->pm.profile_index].dpms_off_cm_idx;
133
	} else {
134
		rdev->pm.requested_power_state_index =
135
			rdev->pm.profiles[rdev->pm.profile_index].dpms_on_ps_idx;
136
		rdev->pm.requested_clock_mode_index =
137
			rdev->pm.profiles[rdev->pm.profile_index].dpms_on_cm_idx;
6104 serge 138
	}
1963 serge 139
}
140
 
141
static void radeon_unmap_vram_bos(struct radeon_device *rdev)
142
{
143
	struct radeon_bo *bo, *n;
144
 
145
	if (list_empty(&rdev->gem.objects))
146
		return;
147
 
5078 serge 148
	list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) {
149
		if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
150
			ttm_bo_unmap_virtual(&bo->tbo);
151
	}
1963 serge 152
}
153
 
2997 Serge 154
static void radeon_sync_with_vblank(struct radeon_device *rdev)
155
{
156
	if (rdev->pm.active_crtcs) {
157
		rdev->pm.vblank_sync = false;
6104 serge 158
		wait_event_timeout(
159
			rdev->irq.vblank_queue, rdev->pm.vblank_sync,
160
			msecs_to_jiffies(RADEON_WAIT_VBLANK_TIMEOUT));
161
	}
2997 Serge 162
}
1963 serge 163
 
164
static void radeon_set_power_state(struct radeon_device *rdev)
165
{
166
	u32 sclk, mclk;
167
	bool misc_after = false;
168
 
169
	if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) &&
170
	    (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index))
171
		return;
172
 
173
	if (radeon_gui_idle(rdev)) {
174
		sclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
175
			clock_info[rdev->pm.requested_clock_mode_index].sclk;
176
		if (sclk > rdev->pm.default_sclk)
177
			sclk = rdev->pm.default_sclk;
178
 
2997 Serge 179
		/* starting with BTC, there is one state that is used for both
180
		 * MH and SH.  Difference is that we always use the high clock index for
3764 Serge 181
		 * mclk and vddci.
2997 Serge 182
		 */
183
		if ((rdev->pm.pm_method == PM_METHOD_PROFILE) &&
184
		    (rdev->family >= CHIP_BARTS) &&
185
		    rdev->pm.active_crtc_count &&
186
		    ((rdev->pm.profile_index == PM_PROFILE_MID_MH_IDX) ||
187
		     (rdev->pm.profile_index == PM_PROFILE_LOW_MH_IDX)))
188
			mclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
189
				clock_info[rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx].mclk;
190
		else
6104 serge 191
			mclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
192
				clock_info[rdev->pm.requested_clock_mode_index].mclk;
2997 Serge 193
 
1963 serge 194
		if (mclk > rdev->pm.default_mclk)
195
			mclk = rdev->pm.default_mclk;
196
 
197
		/* upvolt before raising clocks, downvolt after lowering clocks */
198
		if (sclk < rdev->pm.current_sclk)
199
			misc_after = true;
200
 
2997 Serge 201
		radeon_sync_with_vblank(rdev);
1963 serge 202
 
203
		if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
204
			if (!radeon_pm_in_vbl(rdev))
205
				return;
206
		}
207
 
208
		radeon_pm_prepare(rdev);
209
 
210
		if (!misc_after)
211
			/* voltage, pcie lanes, etc.*/
212
			radeon_pm_misc(rdev);
213
 
6104 serge 214
		/* set engine clock */
1963 serge 215
		if (sclk != rdev->pm.current_sclk) {
6104 serge 216
			radeon_pm_debug_check_in_vbl(rdev, false);
1963 serge 217
			radeon_set_engine_clock(rdev, sclk);
6104 serge 218
			radeon_pm_debug_check_in_vbl(rdev, true);
1963 serge 219
			rdev->pm.current_sclk = sclk;
220
			DRM_DEBUG_DRIVER("Setting: e: %d\n", sclk);
221
		}
222
 
6104 serge 223
		/* set memory clock */
2997 Serge 224
		if (rdev->asic->pm.set_memory_clock && (mclk != rdev->pm.current_mclk)) {
6104 serge 225
			radeon_pm_debug_check_in_vbl(rdev, false);
1963 serge 226
			radeon_set_memory_clock(rdev, mclk);
6104 serge 227
			radeon_pm_debug_check_in_vbl(rdev, true);
1963 serge 228
			rdev->pm.current_mclk = mclk;
229
			DRM_DEBUG_DRIVER("Setting: m: %d\n", mclk);
230
		}
231
 
232
		if (misc_after)
233
			/* voltage, pcie lanes, etc.*/
234
			radeon_pm_misc(rdev);
235
 
236
		radeon_pm_finish(rdev);
237
 
238
		rdev->pm.current_power_state_index = rdev->pm.requested_power_state_index;
239
		rdev->pm.current_clock_mode_index = rdev->pm.requested_clock_mode_index;
240
	} else
241
		DRM_DEBUG_DRIVER("pm: GUI not idle!!!\n");
242
}
243
 
244
static void radeon_pm_set_clocks(struct radeon_device *rdev)
245
{
3764 Serge 246
	int i, r;
1963 serge 247
 
248
	/* no need to take locks, etc. if nothing's going to change */
249
	if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) &&
250
	    (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index))
251
		return;
252
 
5346 serge 253
	down_write(&rdev->pm.mclk_lock);
2997 Serge 254
	mutex_lock(&rdev->ring_lock);
1963 serge 255
 
2997 Serge 256
	/* wait for the rings to drain */
257
	for (i = 0; i < RADEON_NUM_RINGS; i++) {
258
		struct radeon_ring *ring = &rdev->ring[i];
3764 Serge 259
		if (!ring->ready) {
260
			continue;
261
		}
5078 serge 262
		r = radeon_fence_wait_empty(rdev, i);
3764 Serge 263
		if (r) {
264
			/* needs a GPU reset dont reset here */
265
			mutex_unlock(&rdev->ring_lock);
5346 serge 266
			up_write(&rdev->pm.mclk_lock);
3764 Serge 267
			return;
268
		}
1430 serge 269
	}
2997 Serge 270
 
1963 serge 271
	radeon_unmap_vram_bos(rdev);
1430 serge 272
 
1963 serge 273
	if (rdev->irq.installed) {
274
		for (i = 0; i < rdev->num_crtc; i++) {
275
			if (rdev->pm.active_crtcs & (1 << i)) {
6938 serge 276
				/* This can fail if a modeset is in progress */
277
				if (drm_vblank_get(rdev->ddev, i) == 0)
7146 serge 278
					rdev->pm.req_vblank |= (1 << i);
6938 serge 279
				else
280
					DRM_DEBUG_DRIVER("crtc %d no vblank, can glitch\n",
281
							 i);
1963 serge 282
			}
283
		}
284
	}
285
 
286
	radeon_set_power_state(rdev);
287
 
288
	if (rdev->irq.installed) {
289
		for (i = 0; i < rdev->num_crtc; i++) {
290
			if (rdev->pm.req_vblank & (1 << i)) {
291
				rdev->pm.req_vblank &= ~(1 << i);
6104 serge 292
				drm_vblank_put(rdev->ddev, i);
1963 serge 293
			}
294
		}
295
	}
296
 
297
	/* update display watermarks based on new power state */
298
	radeon_update_bandwidth_info(rdev);
299
	if (rdev->pm.active_crtc_count)
300
		radeon_bandwidth_update(rdev);
301
 
302
	rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
303
 
2997 Serge 304
	mutex_unlock(&rdev->ring_lock);
5346 serge 305
	up_write(&rdev->pm.mclk_lock);
1430 serge 306
}
307
 
1963 serge 308
static void radeon_pm_print_states(struct radeon_device *rdev)
1430 serge 309
{
1963 serge 310
	int i, j;
311
	struct radeon_power_state *power_state;
312
	struct radeon_pm_clock_info *clock_info;
313
 
314
	DRM_DEBUG_DRIVER("%d Power State(s)\n", rdev->pm.num_power_states);
315
	for (i = 0; i < rdev->pm.num_power_states; i++) {
316
		power_state = &rdev->pm.power_state[i];
317
		DRM_DEBUG_DRIVER("State %d: %s\n", i,
318
			radeon_pm_state_type_name[power_state->type]);
319
		if (i == rdev->pm.default_power_state_index)
320
			DRM_DEBUG_DRIVER("\tDefault");
321
		if ((rdev->flags & RADEON_IS_PCIE) && !(rdev->flags & RADEON_IS_IGP))
322
			DRM_DEBUG_DRIVER("\t%d PCIE Lanes\n", power_state->pcie_lanes);
323
		if (power_state->flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
324
			DRM_DEBUG_DRIVER("\tSingle display only\n");
325
		DRM_DEBUG_DRIVER("\t%d Clock Mode(s)\n", power_state->num_clock_modes);
326
		for (j = 0; j < power_state->num_clock_modes; j++) {
327
			clock_info = &(power_state->clock_info[j]);
328
			if (rdev->flags & RADEON_IS_IGP)
2997 Serge 329
				DRM_DEBUG_DRIVER("\t\t%d e: %d\n",
6104 serge 330
						 j,
2997 Serge 331
						 clock_info->sclk * 10);
1963 serge 332
			else
2997 Serge 333
				DRM_DEBUG_DRIVER("\t\t%d e: %d\tm: %d\tv: %d\n",
6104 serge 334
						 j,
335
						 clock_info->sclk * 10,
336
						 clock_info->mclk * 10,
2997 Serge 337
						 clock_info->voltage.voltage);
1963 serge 338
		}
1430 serge 339
	}
1963 serge 340
}
1430 serge 341
 
1963 serge 342
static ssize_t radeon_get_pm_profile(struct device *dev,
343
				     struct device_attribute *attr,
344
				     char *buf)
345
{
5078 serge 346
	struct drm_device *ddev = dev_get_drvdata(dev);
2997 Serge 347
	struct radeon_device *rdev = ddev->dev_private;
348
	int cp = rdev->pm.profile;
1963 serge 349
 
2997 Serge 350
	return snprintf(buf, PAGE_SIZE, "%s\n",
351
			(cp == PM_PROFILE_AUTO) ? "auto" :
352
			(cp == PM_PROFILE_LOW) ? "low" :
353
			(cp == PM_PROFILE_MID) ? "mid" :
354
			(cp == PM_PROFILE_HIGH) ? "high" : "default");
1430 serge 355
}
356
 
1963 serge 357
static ssize_t radeon_set_pm_profile(struct device *dev,
358
				     struct device_attribute *attr,
359
				     const char *buf,
360
				     size_t count)
1430 serge 361
{
5078 serge 362
	struct drm_device *ddev = dev_get_drvdata(dev);
1963 serge 363
	struct radeon_device *rdev = ddev->dev_private;
364
 
5078 serge 365
	/* Can't set profile when the card is off */
366
	if  ((rdev->flags & RADEON_IS_PX) &&
367
	     (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
368
		return -EINVAL;
369
 
1963 serge 370
	mutex_lock(&rdev->pm.mutex);
2997 Serge 371
	if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
372
		if (strncmp("default", buf, strlen("default")) == 0)
6104 serge 373
			rdev->pm.profile = PM_PROFILE_DEFAULT;
2997 Serge 374
		else if (strncmp("auto", buf, strlen("auto")) == 0)
375
			rdev->pm.profile = PM_PROFILE_AUTO;
376
		else if (strncmp("low", buf, strlen("low")) == 0)
377
			rdev->pm.profile = PM_PROFILE_LOW;
378
		else if (strncmp("mid", buf, strlen("mid")) == 0)
379
			rdev->pm.profile = PM_PROFILE_MID;
380
		else if (strncmp("high", buf, strlen("high")) == 0)
381
			rdev->pm.profile = PM_PROFILE_HIGH;
382
		else {
383
			count = -EINVAL;
384
			goto fail;
385
		}
6104 serge 386
		radeon_pm_update_profile(rdev);
387
		radeon_pm_set_clocks(rdev);
2997 Serge 388
	} else
389
		count = -EINVAL;
390
 
1963 serge 391
fail:
392
	mutex_unlock(&rdev->pm.mutex);
393
 
394
	return count;
395
}
396
 
397
static ssize_t radeon_get_pm_method(struct device *dev,
398
				    struct device_attribute *attr,
399
				    char *buf)
400
{
5078 serge 401
	struct drm_device *ddev = dev_get_drvdata(dev);
1963 serge 402
	struct radeon_device *rdev = ddev->dev_private;
403
	int pm = rdev->pm.pm_method;
404
 
405
	return snprintf(buf, PAGE_SIZE, "%s\n",
5078 serge 406
			(pm == PM_METHOD_DYNPM) ? "dynpm" :
407
			(pm == PM_METHOD_PROFILE) ? "profile" : "dpm");
1963 serge 408
}
409
 
410
static ssize_t radeon_set_pm_method(struct device *dev,
411
				    struct device_attribute *attr,
412
				    const char *buf,
413
				    size_t count)
414
{
5078 serge 415
	struct drm_device *ddev = dev_get_drvdata(dev);
1963 serge 416
	struct radeon_device *rdev = ddev->dev_private;
417
 
5078 serge 418
	/* Can't set method when the card is off */
419
	if  ((rdev->flags & RADEON_IS_PX) &&
420
	     (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) {
421
		count = -EINVAL;
422
		goto fail;
423
	}
1963 serge 424
 
5078 serge 425
	/* we don't support the legacy modes with dpm */
426
	if (rdev->pm.pm_method == PM_METHOD_DPM) {
427
		count = -EINVAL;
428
		goto fail;
429
	}
430
 
1963 serge 431
	if (strncmp("dynpm", buf, strlen("dynpm")) == 0) {
432
		mutex_lock(&rdev->pm.mutex);
433
		rdev->pm.pm_method = PM_METHOD_DYNPM;
434
		rdev->pm.dynpm_state = DYNPM_STATE_PAUSED;
435
		rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
436
		mutex_unlock(&rdev->pm.mutex);
437
	} else if (strncmp("profile", buf, strlen("profile")) == 0) {
438
		mutex_lock(&rdev->pm.mutex);
439
		/* disable dynpm */
440
		rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
441
		rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
442
		rdev->pm.pm_method = PM_METHOD_PROFILE;
443
		mutex_unlock(&rdev->pm.mutex);
444
//		cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work);
445
	} else {
2997 Serge 446
		count = -EINVAL;
1963 serge 447
		goto fail;
448
	}
449
	radeon_pm_compute_clocks(rdev);
450
fail:
451
	return count;
452
}
453
 
5078 serge 454
static ssize_t radeon_get_dpm_state(struct device *dev,
455
				    struct device_attribute *attr,
456
				    char *buf)
457
{
458
	struct drm_device *ddev = dev_get_drvdata(dev);
459
	struct radeon_device *rdev = ddev->dev_private;
460
	enum radeon_pm_state_type pm = rdev->pm.dpm.user_state;
2997 Serge 461
 
5078 serge 462
	return snprintf(buf, PAGE_SIZE, "%s\n",
463
			(pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
464
			(pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance");
465
}
466
 
467
static ssize_t radeon_set_dpm_state(struct device *dev,
468
				    struct device_attribute *attr,
469
				    const char *buf,
470
				    size_t count)
471
{
472
	struct drm_device *ddev = dev_get_drvdata(dev);
473
	struct radeon_device *rdev = ddev->dev_private;
474
 
475
	mutex_lock(&rdev->pm.mutex);
476
	if (strncmp("battery", buf, strlen("battery")) == 0)
477
		rdev->pm.dpm.user_state = POWER_STATE_TYPE_BATTERY;
478
	else if (strncmp("balanced", buf, strlen("balanced")) == 0)
479
		rdev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED;
480
	else if (strncmp("performance", buf, strlen("performance")) == 0)
481
		rdev->pm.dpm.user_state = POWER_STATE_TYPE_PERFORMANCE;
482
	else {
483
		mutex_unlock(&rdev->pm.mutex);
484
		count = -EINVAL;
485
		goto fail;
486
	}
487
	mutex_unlock(&rdev->pm.mutex);
488
 
489
	/* Can't set dpm state when the card is off */
490
	if (!(rdev->flags & RADEON_IS_PX) ||
491
	    (ddev->switch_power_state == DRM_SWITCH_POWER_ON))
6104 serge 492
		radeon_pm_compute_clocks(rdev);
5078 serge 493
 
494
fail:
495
	return count;
496
}
497
 
498
static ssize_t radeon_get_dpm_forced_performance_level(struct device *dev,
499
						       struct device_attribute *attr,
500
						       char *buf)
501
{
502
	struct drm_device *ddev = dev_get_drvdata(dev);
503
	struct radeon_device *rdev = ddev->dev_private;
504
	enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level;
505
 
506
	if  ((rdev->flags & RADEON_IS_PX) &&
507
	     (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
508
		return snprintf(buf, PAGE_SIZE, "off\n");
509
 
510
	return snprintf(buf, PAGE_SIZE, "%s\n",
511
			(level == RADEON_DPM_FORCED_LEVEL_AUTO) ? "auto" :
512
			(level == RADEON_DPM_FORCED_LEVEL_LOW) ? "low" : "high");
513
}
514
 
515
static ssize_t radeon_set_dpm_forced_performance_level(struct device *dev,
516
						       struct device_attribute *attr,
517
						       const char *buf,
518
						       size_t count)
519
{
520
	struct drm_device *ddev = dev_get_drvdata(dev);
521
	struct radeon_device *rdev = ddev->dev_private;
522
	enum radeon_dpm_forced_level level;
523
	int ret = 0;
524
 
525
	/* Can't force performance level when the card is off */
526
	if  ((rdev->flags & RADEON_IS_PX) &&
527
	     (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
528
		return -EINVAL;
529
 
530
	mutex_lock(&rdev->pm.mutex);
531
	if (strncmp("low", buf, strlen("low")) == 0) {
532
		level = RADEON_DPM_FORCED_LEVEL_LOW;
533
	} else if (strncmp("high", buf, strlen("high")) == 0) {
534
		level = RADEON_DPM_FORCED_LEVEL_HIGH;
535
	} else if (strncmp("auto", buf, strlen("auto")) == 0) {
536
		level = RADEON_DPM_FORCED_LEVEL_AUTO;
537
	} else {
538
		count = -EINVAL;
539
		goto fail;
540
	}
541
	if (rdev->asic->dpm.force_performance_level) {
542
		if (rdev->pm.dpm.thermal_active) {
543
			count = -EINVAL;
544
			goto fail;
545
		}
546
		ret = radeon_dpm_force_performance_level(rdev, level);
547
		if (ret)
548
			count = -EINVAL;
549
	}
550
fail:
551
	mutex_unlock(&rdev->pm.mutex);
552
 
553
	return count;
554
}
555
 
556
 
1963 serge 557
static ssize_t radeon_hwmon_show_temp(struct device *dev,
558
				      struct device_attribute *attr,
559
				      char *buf)
560
{
5078 serge 561
	struct radeon_device *rdev = dev_get_drvdata(dev);
562
	struct drm_device *ddev = rdev->ddev;
2997 Serge 563
	int temp;
1963 serge 564
 
5078 serge 565
	/* Can't get temperature when the card is off */
566
	if  ((rdev->flags & RADEON_IS_PX) &&
567
	     (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
568
		return -EINVAL;
569
 
570
	if (rdev->asic->pm.get_temperature)
571
		temp = radeon_get_temperature(rdev);
572
	else
573
		temp = 0;
574
 
575
	return snprintf(buf, PAGE_SIZE, "%d\n", temp);
576
}
577
 
578
static ssize_t radeon_hwmon_show_temp_thresh(struct device *dev,
579
					     struct device_attribute *attr,
580
					     char *buf)
581
{
582
	struct radeon_device *rdev = dev_get_drvdata(dev);
583
//	int hyst = to_sensor_dev_attr(attr)->index;
584
	int temp;
585
 
586
//	if (hyst)
587
//		temp = rdev->pm.dpm.thermal.min_temp;
588
//	else
589
		temp = rdev->pm.dpm.thermal.max_temp;
590
 
591
	return snprintf(buf, PAGE_SIZE, "%d\n", temp);
592
}
593
 
594
 
595
static struct attribute *hwmon_attributes[] = {
596
//	&sensor_dev_attr_temp1_input.dev_attr.attr,
597
//	&sensor_dev_attr_temp1_crit.dev_attr.attr,
598
//	&sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
599
	NULL
600
};
601
 
602
 
603
 
604
static int radeon_hwmon_init(struct radeon_device *rdev)
605
{
606
	int err = 0;
607
 
1963 serge 608
	switch (rdev->pm.int_thermal_type) {
609
	case THERMAL_TYPE_RV6XX:
610
	case THERMAL_TYPE_RV770:
611
	case THERMAL_TYPE_EVERGREEN:
612
	case THERMAL_TYPE_NI:
2997 Serge 613
	case THERMAL_TYPE_SUMO:
614
	case THERMAL_TYPE_SI:
5078 serge 615
	case THERMAL_TYPE_CI:
616
	case THERMAL_TYPE_KV:
617
		if (rdev->asic->pm.get_temperature == NULL)
618
			return err;
619
 
2997 Serge 620
		break;
1430 serge 621
	default:
1963 serge 622
		break;
1430 serge 623
	}
1963 serge 624
 
5078 serge 625
	return err;
1430 serge 626
}
627
 
5078 serge 628
static void radeon_hwmon_fini(struct radeon_device *rdev)
1430 serge 629
{
5078 serge 630
//   if (rdev->pm.int_hwmon_dev)
631
//       hwmon_device_unregister(rdev->pm.int_hwmon_dev);
1963 serge 632
}
1430 serge 633
 
5078 serge 634
static void radeon_dpm_thermal_work_handler(struct work_struct *work)
1963 serge 635
{
5078 serge 636
	struct radeon_device *rdev =
637
		container_of(work, struct radeon_device,
638
			     pm.dpm.thermal.work);
639
	/* switch to the thermal state */
640
	enum radeon_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL;
1430 serge 641
 
5078 serge 642
	if (!rdev->pm.dpm_enabled)
643
		return;
1963 serge 644
 
5078 serge 645
	if (rdev->asic->pm.get_temperature) {
646
		int temp = radeon_get_temperature(rdev);
647
 
648
		if (temp < rdev->pm.dpm.thermal.min_temp)
649
			/* switch back the user state */
650
			dpm_state = rdev->pm.dpm.user_state;
651
	} else {
652
		if (rdev->pm.dpm.thermal.high_to_low)
653
			/* switch back the user state */
654
			dpm_state = rdev->pm.dpm.user_state;
655
	}
656
	mutex_lock(&rdev->pm.mutex);
657
	if (dpm_state == POWER_STATE_TYPE_INTERNAL_THERMAL)
658
		rdev->pm.dpm.thermal_active = true;
659
	else
660
		rdev->pm.dpm.thermal_active = false;
661
	rdev->pm.dpm.state = dpm_state;
662
	mutex_unlock(&rdev->pm.mutex);
663
 
664
	radeon_pm_compute_clocks(rdev);
1430 serge 665
}
666
 
6104 serge 667
static bool radeon_dpm_single_display(struct radeon_device *rdev)
1963 serge 668
{
5078 serge 669
	bool single_display = (rdev->pm.dpm.new_active_crtc_count < 2) ?
670
		true : false;
671
 
672
	/* check if the vblank period is too short to adjust the mclk */
673
	if (single_display && rdev->asic->dpm.vblank_too_short) {
674
		if (radeon_dpm_vblank_too_short(rdev))
675
			single_display = false;
676
	}
677
 
6104 serge 678
	/* 120hz tends to be problematic even if they are under the
679
	 * vblank limit.
680
	 */
681
	if (single_display && (r600_dpm_get_vrefresh(rdev) >= 120))
682
		single_display = false;
683
 
684
	return single_display;
685
}
686
 
687
static struct radeon_ps *radeon_dpm_pick_power_state(struct radeon_device *rdev,
688
						     enum radeon_pm_state_type dpm_state)
689
{
690
	int i;
691
	struct radeon_ps *ps;
692
	u32 ui_class;
693
	bool single_display = radeon_dpm_single_display(rdev);
694
 
5078 serge 695
	/* certain older asics have a separare 3D performance state,
696
	 * so try that first if the user selected performance
697
	 */
698
	if (dpm_state == POWER_STATE_TYPE_PERFORMANCE)
699
		dpm_state = POWER_STATE_TYPE_INTERNAL_3DPERF;
700
	/* balanced states don't exist at the moment */
701
	if (dpm_state == POWER_STATE_TYPE_BALANCED)
702
		dpm_state = POWER_STATE_TYPE_PERFORMANCE;
703
 
704
restart_search:
705
	/* Pick the best power state based on current conditions */
706
	for (i = 0; i < rdev->pm.dpm.num_ps; i++) {
707
		ps = &rdev->pm.dpm.ps[i];
708
		ui_class = ps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK;
709
		switch (dpm_state) {
710
		/* user states */
711
		case POWER_STATE_TYPE_BATTERY:
712
			if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) {
713
				if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
714
					if (single_display)
715
						return ps;
716
				} else
717
					return ps;
718
			}
719
			break;
720
		case POWER_STATE_TYPE_BALANCED:
721
			if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BALANCED) {
722
				if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
723
					if (single_display)
724
						return ps;
725
				} else
726
					return ps;
727
			}
6104 serge 728
			break;
5078 serge 729
		case POWER_STATE_TYPE_PERFORMANCE:
730
			if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) {
731
				if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
732
					if (single_display)
733
						return ps;
734
				} else
735
					return ps;
736
			}
737
			break;
738
		/* internal states */
739
		case POWER_STATE_TYPE_INTERNAL_UVD:
740
			if (rdev->pm.dpm.uvd_ps)
741
				return rdev->pm.dpm.uvd_ps;
742
			else
743
				break;
744
		case POWER_STATE_TYPE_INTERNAL_UVD_SD:
745
			if (ps->class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
746
				return ps;
747
			break;
748
		case POWER_STATE_TYPE_INTERNAL_UVD_HD:
749
			if (ps->class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
750
				return ps;
751
			break;
752
		case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
753
			if (ps->class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
754
				return ps;
755
			break;
756
		case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
757
			if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
758
				return ps;
759
			break;
760
		case POWER_STATE_TYPE_INTERNAL_BOOT:
761
			return rdev->pm.dpm.boot_ps;
762
		case POWER_STATE_TYPE_INTERNAL_THERMAL:
763
			if (ps->class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
764
				return ps;
765
			break;
766
		case POWER_STATE_TYPE_INTERNAL_ACPI:
767
			if (ps->class & ATOM_PPLIB_CLASSIFICATION_ACPI)
768
				return ps;
769
			break;
770
		case POWER_STATE_TYPE_INTERNAL_ULV:
771
			if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
772
				return ps;
6104 serge 773
			break;
5078 serge 774
		case POWER_STATE_TYPE_INTERNAL_3DPERF:
775
			if (ps->class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
776
				return ps;
777
			break;
6104 serge 778
		default:
779
			break;
5078 serge 780
		}
781
	}
782
	/* use a fallback state if we didn't match */
783
	switch (dpm_state) {
784
	case POWER_STATE_TYPE_INTERNAL_UVD_SD:
785
		dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
786
		goto restart_search;
787
	case POWER_STATE_TYPE_INTERNAL_UVD_HD:
788
	case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
789
	case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
790
		if (rdev->pm.dpm.uvd_ps) {
791
			return rdev->pm.dpm.uvd_ps;
792
		} else {
793
			dpm_state = POWER_STATE_TYPE_PERFORMANCE;
794
			goto restart_search;
795
		}
796
	case POWER_STATE_TYPE_INTERNAL_THERMAL:
797
		dpm_state = POWER_STATE_TYPE_INTERNAL_ACPI;
798
		goto restart_search;
799
	case POWER_STATE_TYPE_INTERNAL_ACPI:
800
		dpm_state = POWER_STATE_TYPE_BATTERY;
801
		goto restart_search;
802
	case POWER_STATE_TYPE_BATTERY:
803
	case POWER_STATE_TYPE_BALANCED:
804
	case POWER_STATE_TYPE_INTERNAL_3DPERF:
805
		dpm_state = POWER_STATE_TYPE_PERFORMANCE;
806
		goto restart_search;
807
	default:
808
		break;
809
	}
810
 
811
	return NULL;
1963 serge 812
}
813
 
5078 serge 814
static void radeon_dpm_change_power_state_locked(struct radeon_device *rdev)
1963 serge 815
{
5078 serge 816
	int i;
817
	struct radeon_ps *ps;
818
	enum radeon_pm_state_type dpm_state;
819
	int ret;
6104 serge 820
	bool single_display = radeon_dpm_single_display(rdev);
5078 serge 821
 
822
	/* if dpm init failed */
823
	if (!rdev->pm.dpm_enabled)
824
		return;
825
 
826
	if (rdev->pm.dpm.user_state != rdev->pm.dpm.state) {
827
		/* add other state override checks here */
828
		if ((!rdev->pm.dpm.thermal_active) &&
829
		    (!rdev->pm.dpm.uvd_active))
830
			rdev->pm.dpm.state = rdev->pm.dpm.user_state;
831
	}
832
	dpm_state = rdev->pm.dpm.state;
833
 
834
	ps = radeon_dpm_pick_power_state(rdev, dpm_state);
835
	if (ps)
836
		rdev->pm.dpm.requested_ps = ps;
837
	else
838
		return;
839
 
840
	/* no need to reprogram if nothing changed unless we are on BTC+ */
841
	if (rdev->pm.dpm.current_ps == rdev->pm.dpm.requested_ps) {
842
		/* vce just modifies an existing state so force a change */
843
		if (ps->vce_active != rdev->pm.dpm.vce_active)
844
			goto force;
6104 serge 845
		/* user has made a display change (such as timing) */
846
		if (rdev->pm.dpm.single_display != single_display)
847
			goto force;
5078 serge 848
		if ((rdev->family < CHIP_BARTS) || (rdev->flags & RADEON_IS_IGP)) {
849
			/* for pre-BTC and APUs if the num crtcs changed but state is the same,
850
			 * all we need to do is update the display configuration.
851
			 */
852
			if (rdev->pm.dpm.new_active_crtcs != rdev->pm.dpm.current_active_crtcs) {
853
				/* update display watermarks based on new power state */
854
				radeon_bandwidth_update(rdev);
855
				/* update displays */
856
				radeon_dpm_display_configuration_changed(rdev);
857
				rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs;
858
				rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count;
859
			}
860
			return;
861
		} else {
862
			/* for BTC+ if the num crtcs hasn't changed and state is the same,
863
			 * nothing to do, if the num crtcs is > 1 and state is the same,
864
			 * update display configuration.
865
			 */
866
			if (rdev->pm.dpm.new_active_crtcs ==
867
			    rdev->pm.dpm.current_active_crtcs) {
868
				return;
869
			} else {
870
				if ((rdev->pm.dpm.current_active_crtc_count > 1) &&
871
				    (rdev->pm.dpm.new_active_crtc_count > 1)) {
872
					/* update display watermarks based on new power state */
873
					radeon_bandwidth_update(rdev);
874
					/* update displays */
875
					radeon_dpm_display_configuration_changed(rdev);
876
					rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs;
877
					rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count;
878
					return;
879
				}
880
			}
881
		}
882
	}
883
 
884
force:
885
	if (radeon_dpm == 1) {
886
		printk("switching from power state:\n");
887
		radeon_dpm_print_power_state(rdev, rdev->pm.dpm.current_ps);
888
		printk("switching to power state:\n");
889
		radeon_dpm_print_power_state(rdev, rdev->pm.dpm.requested_ps);
890
	}
891
 
5346 serge 892
	down_write(&rdev->pm.mclk_lock);
5078 serge 893
	mutex_lock(&rdev->ring_lock);
894
 
895
	/* update whether vce is active */
896
	ps->vce_active = rdev->pm.dpm.vce_active;
897
 
898
	ret = radeon_dpm_pre_set_power_state(rdev);
899
	if (ret)
900
		goto done;
901
 
902
	/* update display watermarks based on new power state */
903
	radeon_bandwidth_update(rdev);
6661 serge 904
	/* update displays */
905
	radeon_dpm_display_configuration_changed(rdev);
5078 serge 906
 
907
	/* wait for the rings to drain */
908
	for (i = 0; i < RADEON_NUM_RINGS; i++) {
909
		struct radeon_ring *ring = &rdev->ring[i];
910
		if (ring->ready)
911
			radeon_fence_wait_empty(rdev, i);
912
	}
913
 
914
	/* program the new power state */
915
	radeon_dpm_set_power_state(rdev);
916
 
917
	/* update current power state */
918
	rdev->pm.dpm.current_ps = rdev->pm.dpm.requested_ps;
919
 
920
	radeon_dpm_post_set_power_state(rdev);
921
 
6321 serge 922
	rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs;
923
	rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count;
924
	rdev->pm.dpm.single_display = single_display;
925
 
5078 serge 926
	if (rdev->asic->dpm.force_performance_level) {
927
		if (rdev->pm.dpm.thermal_active) {
928
			enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level;
929
			/* force low perf level for thermal */
930
			radeon_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_LOW);
931
			/* save the user's level */
932
			rdev->pm.dpm.forced_level = level;
933
		} else {
934
			/* otherwise, user selected level */
935
			radeon_dpm_force_performance_level(rdev, rdev->pm.dpm.forced_level);
936
		}
937
	}
938
 
939
done:
940
	mutex_unlock(&rdev->ring_lock);
5346 serge 941
	up_write(&rdev->pm.mclk_lock);
5078 serge 942
}
943
 
944
void radeon_dpm_enable_uvd(struct radeon_device *rdev, bool enable)
945
{
946
	enum radeon_pm_state_type dpm_state;
947
 
948
	if (rdev->asic->dpm.powergate_uvd) {
949
		mutex_lock(&rdev->pm.mutex);
950
		/* don't powergate anything if we
951
		   have active but pause streams */
952
		enable |= rdev->pm.dpm.sd > 0;
953
		enable |= rdev->pm.dpm.hd > 0;
954
		/* enable/disable UVD */
955
		radeon_dpm_powergate_uvd(rdev, !enable);
956
		mutex_unlock(&rdev->pm.mutex);
957
	} else {
958
		if (enable) {
959
			mutex_lock(&rdev->pm.mutex);
960
			rdev->pm.dpm.uvd_active = true;
961
			/* disable this for now */
962
#if 0
963
			if ((rdev->pm.dpm.sd == 1) && (rdev->pm.dpm.hd == 0))
964
				dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_SD;
965
			else if ((rdev->pm.dpm.sd == 2) && (rdev->pm.dpm.hd == 0))
966
				dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
967
			else if ((rdev->pm.dpm.sd == 0) && (rdev->pm.dpm.hd == 1))
968
				dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
969
			else if ((rdev->pm.dpm.sd == 0) && (rdev->pm.dpm.hd == 2))
970
				dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD2;
971
			else
972
#endif
973
				dpm_state = POWER_STATE_TYPE_INTERNAL_UVD;
974
			rdev->pm.dpm.state = dpm_state;
975
			mutex_unlock(&rdev->pm.mutex);
976
		} else {
977
			mutex_lock(&rdev->pm.mutex);
978
			rdev->pm.dpm.uvd_active = false;
979
			mutex_unlock(&rdev->pm.mutex);
980
		}
981
 
982
		radeon_pm_compute_clocks(rdev);
983
	}
984
}
985
 
986
void radeon_dpm_enable_vce(struct radeon_device *rdev, bool enable)
987
{
988
	if (enable) {
989
		mutex_lock(&rdev->pm.mutex);
990
		rdev->pm.dpm.vce_active = true;
991
		/* XXX select vce level based on ring/task */
992
		rdev->pm.dpm.vce_level = RADEON_VCE_LEVEL_AC_ALL;
993
		mutex_unlock(&rdev->pm.mutex);
994
	} else {
995
		mutex_lock(&rdev->pm.mutex);
996
		rdev->pm.dpm.vce_active = false;
997
		mutex_unlock(&rdev->pm.mutex);
998
	}
999
 
1000
	radeon_pm_compute_clocks(rdev);
1001
}
1002
 
1003
static void radeon_pm_suspend_old(struct radeon_device *rdev)
1004
{
1963 serge 1005
	mutex_lock(&rdev->pm.mutex);
1006
	if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
1007
		if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE)
1008
			rdev->pm.dynpm_state = DYNPM_STATE_SUSPENDED;
1009
	}
1010
	mutex_unlock(&rdev->pm.mutex);
1011
 
1012
}
1013
 
5078 serge 1014
static void radeon_pm_suspend_dpm(struct radeon_device *rdev)
1963 serge 1015
{
5078 serge 1016
	mutex_lock(&rdev->pm.mutex);
1017
	/* disable dpm */
1018
	radeon_dpm_disable(rdev);
1019
	/* reset the power state */
1020
	rdev->pm.dpm.current_ps = rdev->pm.dpm.requested_ps = rdev->pm.dpm.boot_ps;
1021
	rdev->pm.dpm_enabled = false;
1022
	mutex_unlock(&rdev->pm.mutex);
1023
}
1024
 
1025
void radeon_pm_suspend(struct radeon_device *rdev)
1026
{
1027
	if (rdev->pm.pm_method == PM_METHOD_DPM)
1028
		radeon_pm_suspend_dpm(rdev);
1029
	else
1030
		radeon_pm_suspend_old(rdev);
1031
}
1032
 
1033
static void radeon_pm_resume_old(struct radeon_device *rdev)
1034
{
1035
	/* set up the default clocks if the MC ucode is loaded */
1036
	if ((rdev->family >= CHIP_BARTS) &&
1037
	    (rdev->family <= CHIP_CAYMAN) &&
1038
	    rdev->mc_fw) {
1039
		if (rdev->pm.default_vddc)
1040
			radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
1041
						SET_VOLTAGE_TYPE_ASIC_VDDC);
1042
		if (rdev->pm.default_vddci)
1043
			radeon_atom_set_voltage(rdev, rdev->pm.default_vddci,
1044
						SET_VOLTAGE_TYPE_ASIC_VDDCI);
1045
		if (rdev->pm.default_sclk)
1046
			radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
1047
		if (rdev->pm.default_mclk)
1048
			radeon_set_memory_clock(rdev, rdev->pm.default_mclk);
1049
	}
1963 serge 1050
	/* asic init will reset the default power state */
1051
	mutex_lock(&rdev->pm.mutex);
1052
	rdev->pm.current_power_state_index = rdev->pm.default_power_state_index;
1053
	rdev->pm.current_clock_mode_index = 0;
1054
	rdev->pm.current_sclk = rdev->pm.default_sclk;
1055
	rdev->pm.current_mclk = rdev->pm.default_mclk;
5078 serge 1056
	if (rdev->pm.power_state) {
6104 serge 1057
		rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage;
1058
		rdev->pm.current_vddci = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.vddci;
5078 serge 1059
	}
1963 serge 1060
	if (rdev->pm.pm_method == PM_METHOD_DYNPM
1061
	    && rdev->pm.dynpm_state == DYNPM_STATE_SUSPENDED) {
1062
		rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
1063
//		schedule_delayed_work(&rdev->pm.dynpm_idle_work,
1064
//					msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
1065
	}
1066
	mutex_unlock(&rdev->pm.mutex);
1067
	radeon_pm_compute_clocks(rdev);
1068
}
1069
 
5078 serge 1070
static void radeon_pm_resume_dpm(struct radeon_device *rdev)
1268 serge 1071
{
1963 serge 1072
	int ret;
1430 serge 1073
 
5078 serge 1074
	/* asic init will reset to the boot state */
1075
	mutex_lock(&rdev->pm.mutex);
1076
	rdev->pm.dpm.current_ps = rdev->pm.dpm.requested_ps = rdev->pm.dpm.boot_ps;
1077
	radeon_dpm_setup_asic(rdev);
1078
	ret = radeon_dpm_enable(rdev);
1079
	mutex_unlock(&rdev->pm.mutex);
1080
	if (ret)
1081
		goto dpm_resume_fail;
1082
	rdev->pm.dpm_enabled = true;
1083
	return;
1084
 
1085
dpm_resume_fail:
1086
	DRM_ERROR("radeon: dpm resume failed\n");
1087
	if ((rdev->family >= CHIP_BARTS) &&
1088
	    (rdev->family <= CHIP_CAYMAN) &&
1089
	    rdev->mc_fw) {
1090
		if (rdev->pm.default_vddc)
1091
			radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
1092
						SET_VOLTAGE_TYPE_ASIC_VDDC);
1093
		if (rdev->pm.default_vddci)
1094
			radeon_atom_set_voltage(rdev, rdev->pm.default_vddci,
1095
						SET_VOLTAGE_TYPE_ASIC_VDDCI);
1096
		if (rdev->pm.default_sclk)
1097
			radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
1098
		if (rdev->pm.default_mclk)
1099
			radeon_set_memory_clock(rdev, rdev->pm.default_mclk);
1100
	}
1101
}
1102
 
1103
void radeon_pm_resume(struct radeon_device *rdev)
1104
{
1105
	if (rdev->pm.pm_method == PM_METHOD_DPM)
1106
		radeon_pm_resume_dpm(rdev);
1107
	else
1108
		radeon_pm_resume_old(rdev);
1109
}
1110
 
1111
static int radeon_pm_init_old(struct radeon_device *rdev)
1112
{
1113
	int ret;
1114
 
1963 serge 1115
	rdev->pm.profile = PM_PROFILE_DEFAULT;
1116
	rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
1117
	rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
1118
	rdev->pm.dynpm_can_upclock = true;
1119
	rdev->pm.dynpm_can_downclock = true;
1120
	rdev->pm.default_sclk = rdev->clock.default_sclk;
1121
	rdev->pm.default_mclk = rdev->clock.default_mclk;
1122
	rdev->pm.current_sclk = rdev->clock.default_sclk;
1123
	rdev->pm.current_mclk = rdev->clock.default_mclk;
1124
	rdev->pm.int_thermal_type = THERMAL_TYPE_NONE;
1125
 
1430 serge 1126
	if (rdev->bios) {
1127
		if (rdev->is_atom_bios)
1128
			radeon_atombios_get_power_modes(rdev);
1129
		else
1130
			radeon_combios_get_power_modes(rdev);
1963 serge 1131
		radeon_pm_print_states(rdev);
1132
		radeon_pm_init_profile(rdev);
5078 serge 1133
		/* set up the default clocks if the MC ucode is loaded */
1134
		if ((rdev->family >= CHIP_BARTS) &&
1135
		    (rdev->family <= CHIP_CAYMAN) &&
1136
		    rdev->mc_fw) {
1137
			if (rdev->pm.default_vddc)
1138
				radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
1139
							SET_VOLTAGE_TYPE_ASIC_VDDC);
1140
			if (rdev->pm.default_vddci)
1141
				radeon_atom_set_voltage(rdev, rdev->pm.default_vddci,
1142
							SET_VOLTAGE_TYPE_ASIC_VDDCI);
1143
			if (rdev->pm.default_sclk)
1144
				radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
1145
			if (rdev->pm.default_mclk)
1146
				radeon_set_memory_clock(rdev, rdev->pm.default_mclk);
1147
		}
1430 serge 1148
	}
1149
 
1963 serge 1150
	/* set up the internal thermal sensor if applicable */
1151
	ret = radeon_hwmon_init(rdev);
1152
	if (ret)
1153
		return ret;
1268 serge 1154
 
5078 serge 1155
//	INIT_DELAYED_WORK(&rdev->pm.dynpm_idle_work, radeon_dynpm_idle_work_handler);
1156
 
1963 serge 1157
	if (rdev->pm.num_power_states > 1) {
5078 serge 1158
		/* where's the best place to put these? */
1430 serge 1159
 
5078 serge 1160
 
1963 serge 1161
		DRM_INFO("radeon: power management initialized\n");
1430 serge 1162
	}
1163
 
1268 serge 1164
	return 0;
1165
}
1166
 
5078 serge 1167
static void radeon_dpm_print_power_states(struct radeon_device *rdev)
1963 serge 1168
{
5078 serge 1169
	int i;
1170
 
1171
	for (i = 0; i < rdev->pm.dpm.num_ps; i++) {
1172
		printk("== power state %d ==\n", i);
1173
		radeon_dpm_print_power_state(rdev, &rdev->pm.dpm.ps[i]);
1174
	}
1175
}
1176
 
1177
static int radeon_pm_init_dpm(struct radeon_device *rdev)
1178
{
1179
	int ret;
1180
 
1181
	/* default to balanced state */
1182
	rdev->pm.dpm.state = POWER_STATE_TYPE_BALANCED;
1183
	rdev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED;
1184
	rdev->pm.dpm.forced_level = RADEON_DPM_FORCED_LEVEL_AUTO;
1185
	rdev->pm.default_sclk = rdev->clock.default_sclk;
1186
	rdev->pm.default_mclk = rdev->clock.default_mclk;
1187
	rdev->pm.current_sclk = rdev->clock.default_sclk;
1188
	rdev->pm.current_mclk = rdev->clock.default_mclk;
1189
	rdev->pm.int_thermal_type = THERMAL_TYPE_NONE;
1190
 
1191
	if (rdev->bios && rdev->is_atom_bios)
1192
		radeon_atombios_get_power_modes(rdev);
1193
	else
1194
		return -EINVAL;
1195
 
1196
	/* set up the internal thermal sensor if applicable */
1197
	ret = radeon_hwmon_init(rdev);
1198
	if (ret)
1199
		return ret;
1200
 
1201
	INIT_WORK(&rdev->pm.dpm.thermal.work, radeon_dpm_thermal_work_handler);
1202
	mutex_lock(&rdev->pm.mutex);
1203
	radeon_dpm_init(rdev);
1204
	rdev->pm.dpm.current_ps = rdev->pm.dpm.requested_ps = rdev->pm.dpm.boot_ps;
1205
	if (radeon_dpm == 1)
1206
		radeon_dpm_print_power_states(rdev);
1207
	radeon_dpm_setup_asic(rdev);
1208
	ret = radeon_dpm_enable(rdev);
1209
	mutex_unlock(&rdev->pm.mutex);
1210
	if (ret)
1211
		goto dpm_failed;
1212
	rdev->pm.dpm_enabled = true;
1213
 
1214
	DRM_INFO("radeon: dpm initialized\n");
1215
 
1216
	return 0;
1217
 
1218
dpm_failed:
1219
	rdev->pm.dpm_enabled = false;
1220
	if ((rdev->family >= CHIP_BARTS) &&
1221
	    (rdev->family <= CHIP_CAYMAN) &&
1222
	    rdev->mc_fw) {
1223
		if (rdev->pm.default_vddc)
1224
			radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
1225
						SET_VOLTAGE_TYPE_ASIC_VDDC);
1226
		if (rdev->pm.default_vddci)
1227
			radeon_atom_set_voltage(rdev, rdev->pm.default_vddci,
1228
						SET_VOLTAGE_TYPE_ASIC_VDDCI);
1229
		if (rdev->pm.default_sclk)
1230
			radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
1231
		if (rdev->pm.default_mclk)
1232
			radeon_set_memory_clock(rdev, rdev->pm.default_mclk);
1233
	}
1234
	DRM_ERROR("radeon: dpm initialization failed\n");
1235
	return ret;
1236
}
1237
 
6104 serge 1238
struct radeon_dpm_quirk {
1239
	u32 chip_vendor;
1240
	u32 chip_device;
1241
	u32 subsys_vendor;
1242
	u32 subsys_device;
1243
};
1244
 
1245
/* cards with dpm stability problems */
1246
static struct radeon_dpm_quirk radeon_dpm_quirk_list[] = {
1247
	/* TURKS - https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1386534 */
1248
	{ PCI_VENDOR_ID_ATI, 0x6759, 0x1682, 0x3195 },
1249
	/* TURKS - https://bugzilla.kernel.org/show_bug.cgi?id=83731 */
1250
	{ PCI_VENDOR_ID_ATI, 0x6840, 0x1179, 0xfb81 },
1251
	{ 0, 0, 0, 0 },
1252
};
1253
 
5078 serge 1254
int radeon_pm_init(struct radeon_device *rdev)
1255
{
6104 serge 1256
	struct radeon_dpm_quirk *p = radeon_dpm_quirk_list;
1257
	bool disable_dpm = false;
1258
 
1259
	/* Apply dpm quirks */
1260
	while (p && p->chip_device != 0) {
1261
		if (rdev->pdev->vendor == p->chip_vendor &&
1262
		    rdev->pdev->device == p->chip_device &&
1263
		    rdev->pdev->subsystem_vendor == p->subsys_vendor &&
1264
		    rdev->pdev->subsystem_device == p->subsys_device) {
1265
			disable_dpm = true;
1266
			break;
1267
		}
1268
		++p;
1269
	}
1270
 
5078 serge 1271
	/* enable dpm on rv6xx+ */
1272
	switch (rdev->family) {
1273
	case CHIP_RV610:
1274
	case CHIP_RV630:
1275
	case CHIP_RV620:
1276
	case CHIP_RV635:
1277
	case CHIP_RV670:
1278
	case CHIP_RS780:
1279
	case CHIP_RS880:
1280
	case CHIP_RV770:
1281
		/* DPM requires the RLC, RV770+ dGPU requires SMC */
1282
		if (!rdev->rlc_fw)
1283
			rdev->pm.pm_method = PM_METHOD_PROFILE;
1284
		else if ((rdev->family >= CHIP_RV770) &&
1285
			 (!(rdev->flags & RADEON_IS_IGP)) &&
1286
			 (!rdev->smc_fw))
1287
			rdev->pm.pm_method = PM_METHOD_PROFILE;
1288
		else if (radeon_dpm == 1)
1289
			rdev->pm.pm_method = PM_METHOD_DPM;
1290
		else
1291
			rdev->pm.pm_method = PM_METHOD_PROFILE;
1292
		break;
1293
	case CHIP_RV730:
1294
	case CHIP_RV710:
1295
	case CHIP_RV740:
1296
	case CHIP_CEDAR:
1297
	case CHIP_REDWOOD:
1298
	case CHIP_JUNIPER:
1299
	case CHIP_CYPRESS:
1300
	case CHIP_HEMLOCK:
1301
	case CHIP_PALM:
1302
	case CHIP_SUMO:
1303
	case CHIP_SUMO2:
1304
	case CHIP_BARTS:
1305
	case CHIP_TURKS:
1306
	case CHIP_CAICOS:
1307
	case CHIP_CAYMAN:
1308
	case CHIP_ARUBA:
1309
	case CHIP_TAHITI:
1310
	case CHIP_PITCAIRN:
1311
	case CHIP_VERDE:
1312
	case CHIP_OLAND:
1313
	case CHIP_HAINAN:
1314
	case CHIP_BONAIRE:
1315
	case CHIP_KABINI:
1316
	case CHIP_KAVERI:
1317
	case CHIP_HAWAII:
1318
	case CHIP_MULLINS:
1319
		/* DPM requires the RLC, RV770+ dGPU requires SMC */
1320
		if (!rdev->rlc_fw)
1321
			rdev->pm.pm_method = PM_METHOD_PROFILE;
1322
		else if ((rdev->family >= CHIP_RV770) &&
1323
			 (!(rdev->flags & RADEON_IS_IGP)) &&
1324
			 (!rdev->smc_fw))
1325
			rdev->pm.pm_method = PM_METHOD_PROFILE;
6104 serge 1326
		else if (disable_dpm && (radeon_dpm == -1))
1327
			rdev->pm.pm_method = PM_METHOD_PROFILE;
5078 serge 1328
		else if (radeon_dpm == 0)
1329
			rdev->pm.pm_method = PM_METHOD_PROFILE;
1330
		else
1331
			rdev->pm.pm_method = PM_METHOD_DPM;
1332
		break;
1333
	default:
1334
		/* default to profile method */
1335
		rdev->pm.pm_method = PM_METHOD_PROFILE;
1336
		break;
1337
	}
1338
 
1339
	if (rdev->pm.pm_method == PM_METHOD_DPM)
1340
		return radeon_pm_init_dpm(rdev);
1341
	else
1342
		return radeon_pm_init_old(rdev);
1343
}
1344
 
1345
int radeon_pm_late_init(struct radeon_device *rdev)
1346
{
1347
	int ret = 0;
1348
 
1349
	if (rdev->pm.pm_method == PM_METHOD_DPM) {
1350
		mutex_lock(&rdev->pm.mutex);
1351
		ret = radeon_dpm_late_enable(rdev);
1352
		mutex_unlock(&rdev->pm.mutex);
1353
	}
1354
	return ret;
1355
}
1356
 
1357
static void radeon_pm_fini_old(struct radeon_device *rdev)
1358
{
1963 serge 1359
	if (rdev->pm.num_power_states > 1) {
1360
		mutex_lock(&rdev->pm.mutex);
1361
		if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
1362
			rdev->pm.profile = PM_PROFILE_DEFAULT;
1363
			radeon_pm_update_profile(rdev);
1364
			radeon_pm_set_clocks(rdev);
1365
		} else if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
1366
			/* reset default clocks */
1367
			rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
1368
			rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
1369
			radeon_pm_set_clocks(rdev);
1370
		}
1371
		mutex_unlock(&rdev->pm.mutex);
1372
 
1373
//		cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work);
1374
 
5078 serge 1375
   }
1963 serge 1376
 
1377
	radeon_hwmon_fini(rdev);
6104 serge 1378
	kfree(rdev->pm.power_state);
1963 serge 1379
}
1380
 
5078 serge 1381
static void radeon_pm_fini_dpm(struct radeon_device *rdev)
1430 serge 1382
{
5078 serge 1383
	if (rdev->pm.num_power_states > 1) {
1384
		mutex_lock(&rdev->pm.mutex);
1385
		radeon_dpm_disable(rdev);
1386
		mutex_unlock(&rdev->pm.mutex);
1387
	}
1388
	radeon_dpm_fini(rdev);
1389
 
1390
	radeon_hwmon_fini(rdev);
1391
	kfree(rdev->pm.power_state);
1392
}
1393
 
1394
void radeon_pm_fini(struct radeon_device *rdev)
1395
{
1396
	if (rdev->pm.pm_method == PM_METHOD_DPM)
1397
		radeon_pm_fini_dpm(rdev);
1398
	else
1399
		radeon_pm_fini_old(rdev);
1400
}
1401
 
1402
static void radeon_pm_compute_clocks_old(struct radeon_device *rdev)
1403
{
1430 serge 1404
	struct drm_device *ddev = rdev->ddev;
1963 serge 1405
	struct drm_crtc *crtc;
1430 serge 1406
	struct radeon_crtc *radeon_crtc;
1407
 
1963 serge 1408
	if (rdev->pm.num_power_states < 2)
1430 serge 1409
		return;
1410
 
1411
	mutex_lock(&rdev->pm.mutex);
1412
 
1413
	rdev->pm.active_crtcs = 0;
1963 serge 1414
	rdev->pm.active_crtc_count = 0;
5078 serge 1415
	if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) {
6104 serge 1416
		list_for_each_entry(crtc,
1417
				    &ddev->mode_config.crtc_list, head) {
1418
			radeon_crtc = to_radeon_crtc(crtc);
1419
			if (radeon_crtc->enabled) {
1420
				rdev->pm.active_crtcs |= (1 << radeon_crtc->crtc_id);
1421
				rdev->pm.active_crtc_count++;
1422
			}
1430 serge 1423
		}
1424
	}
1425
 
1963 serge 1426
	if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
1427
		radeon_pm_update_profile(rdev);
1428
		radeon_pm_set_clocks(rdev);
1429
	} else if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
1430
		if (rdev->pm.dynpm_state != DYNPM_STATE_DISABLED) {
1431
			if (rdev->pm.active_crtc_count > 1) {
1432
				if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) {
1433
//                   cancel_delayed_work(&rdev->pm.dynpm_idle_work);
1430 serge 1434
 
1963 serge 1435
					rdev->pm.dynpm_state = DYNPM_STATE_PAUSED;
1436
					rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
1437
					radeon_pm_get_dynpm_state(rdev);
6104 serge 1438
					radeon_pm_set_clocks(rdev);
1430 serge 1439
 
1963 serge 1440
					DRM_DEBUG_DRIVER("radeon: dynamic power management deactivated\n");
6104 serge 1441
				}
1963 serge 1442
			} else if (rdev->pm.active_crtc_count == 1) {
6104 serge 1443
				/* TODO: Increase clocks if needed for current mode */
1430 serge 1444
 
1963 serge 1445
				if (rdev->pm.dynpm_state == DYNPM_STATE_MINIMUM) {
1446
					rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
1447
					rdev->pm.dynpm_planned_action = DYNPM_ACTION_UPCLOCK;
1448
					radeon_pm_get_dynpm_state(rdev);
6104 serge 1449
					radeon_pm_set_clocks(rdev);
1963 serge 1450
 
1451
//					schedule_delayed_work(&rdev->pm.dynpm_idle_work,
1452
//							   msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
1453
				} else if (rdev->pm.dynpm_state == DYNPM_STATE_PAUSED) {
1454
					rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
1455
//					schedule_delayed_work(&rdev->pm.dynpm_idle_work,
1456
//							   msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
1457
					DRM_DEBUG_DRIVER("radeon: dynamic power management activated\n");
6104 serge 1458
				}
1963 serge 1459
			} else { /* count == 0 */
1460
				if (rdev->pm.dynpm_state != DYNPM_STATE_MINIMUM) {
1461
//					cancel_delayed_work(&rdev->pm.dynpm_idle_work);
1462
 
1463
					rdev->pm.dynpm_state = DYNPM_STATE_MINIMUM;
1464
					rdev->pm.dynpm_planned_action = DYNPM_ACTION_MINIMUM;
1465
					radeon_pm_get_dynpm_state(rdev);
1466
					radeon_pm_set_clocks(rdev);
6104 serge 1467
				}
1468
			}
1430 serge 1469
		}
1470
	}
1471
 
1472
	mutex_unlock(&rdev->pm.mutex);
1473
}
1474
 
5078 serge 1475
static void radeon_pm_compute_clocks_dpm(struct radeon_device *rdev)
1476
{
1477
	struct drm_device *ddev = rdev->ddev;
1478
	struct drm_crtc *crtc;
1479
	struct radeon_crtc *radeon_crtc;
1480
 
1481
	if (!rdev->pm.dpm_enabled)
1482
		return;
1483
 
1484
	mutex_lock(&rdev->pm.mutex);
1485
 
1486
	/* update active crtc counts */
1487
	rdev->pm.dpm.new_active_crtcs = 0;
1488
	rdev->pm.dpm.new_active_crtc_count = 0;
1489
	if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) {
1490
		list_for_each_entry(crtc,
1491
				    &ddev->mode_config.crtc_list, head) {
1492
			radeon_crtc = to_radeon_crtc(crtc);
1493
			if (crtc->enabled) {
1494
				rdev->pm.dpm.new_active_crtcs |= (1 << radeon_crtc->crtc_id);
1495
				rdev->pm.dpm.new_active_crtc_count++;
1496
			}
1497
		}
1498
	}
1499
 
1500
	/* update battery/ac status */
1501
	if (power_supply_is_system_supplied() > 0)
1502
		rdev->pm.dpm.ac_power = true;
1503
	else
1504
		rdev->pm.dpm.ac_power = false;
1505
 
1506
	radeon_dpm_change_power_state_locked(rdev);
1507
 
1508
	mutex_unlock(&rdev->pm.mutex);
1509
 
1510
}
1511
 
1512
void radeon_pm_compute_clocks(struct radeon_device *rdev)
1513
{
1514
	if (rdev->pm.pm_method == PM_METHOD_DPM)
1515
		radeon_pm_compute_clocks_dpm(rdev);
1516
	else
1517
		radeon_pm_compute_clocks_old(rdev);
1518
}
1519
 
1963 serge 1520
static bool radeon_pm_in_vbl(struct radeon_device *rdev)
1430 serge 1521
{
1963 serge 1522
	int  crtc, vpos, hpos, vbl_status;
1430 serge 1523
	bool in_vbl = true;
1524
 
1963 serge 1525
	/* Iterate over all active crtc's. All crtc's must be in vblank,
1526
	 * otherwise return in_vbl == false.
1527
	 */
1528
	for (crtc = 0; (crtc < rdev->num_crtc) && in_vbl; crtc++) {
1529
		if (rdev->pm.active_crtcs & (1 << crtc)) {
6104 serge 1530
			vbl_status = radeon_get_crtc_scanoutpos(rdev->ddev,
1531
								crtc,
1532
								USE_REAL_VBLANKSTART,
1533
								&vpos, &hpos, NULL, NULL,
1534
								&rdev->mode_info.crtcs[crtc]->base.hwmode);
1963 serge 1535
			if ((vbl_status & DRM_SCANOUTPOS_VALID) &&
5271 serge 1536
			    !(vbl_status & DRM_SCANOUTPOS_IN_VBLANK))
1430 serge 1537
				in_vbl = false;
1538
		}
6104 serge 1539
	}
1963 serge 1540
 
1430 serge 1541
	return in_vbl;
1542
}
1543
 
1963 serge 1544
static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish)
1430 serge 1545
{
1963 serge 1546
	u32 stat_crtc = 0;
1547
	bool in_vbl = radeon_pm_in_vbl(rdev);
1430 serge 1548
 
1963 serge 1549
	if (in_vbl == false)
1550
		DRM_DEBUG_DRIVER("not in vbl for pm change %08x at %s\n", stat_crtc,
1551
			 finish ? "exit" : "entry");
1552
	return in_vbl;
1430 serge 1553
}
1554
 
1555
 
1268 serge 1556
/*
1557
 * Debugfs info
1558
 */
1559
#if defined(CONFIG_DEBUG_FS)
1560
 
1561
static int radeon_debugfs_pm_info(struct seq_file *m, void *data)
1562
{
1563
	struct drm_info_node *node = (struct drm_info_node *) m->private;
1564
	struct drm_device *dev = node->minor->dev;
1565
	struct radeon_device *rdev = dev->dev_private;
5078 serge 1566
	struct drm_device *ddev = rdev->ddev;
1268 serge 1567
 
5078 serge 1568
	if  ((rdev->flags & RADEON_IS_PX) &&
1569
	     (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) {
1570
		seq_printf(m, "PX asic powered off\n");
1571
	} else if (rdev->pm.dpm_enabled) {
1572
		mutex_lock(&rdev->pm.mutex);
1573
		if (rdev->asic->dpm.debugfs_print_current_performance_level)
1574
			radeon_dpm_debugfs_print_current_performance_level(rdev, m);
1575
		else
1576
			seq_printf(m, "Debugfs support not implemented for this asic\n");
1577
		mutex_unlock(&rdev->pm.mutex);
1578
	} else {
6104 serge 1579
		seq_printf(m, "default engine clock: %u0 kHz\n", rdev->pm.default_sclk);
1580
		/* radeon_get_engine_clock is not reliable on APUs so just print the current clock */
1581
		if ((rdev->family >= CHIP_PALM) && (rdev->flags & RADEON_IS_IGP))
1582
			seq_printf(m, "current engine clock: %u0 kHz\n", rdev->pm.current_sclk);
1583
		else
1584
			seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev));
1585
		seq_printf(m, "default memory clock: %u0 kHz\n", rdev->pm.default_mclk);
1586
		if (rdev->asic->pm.get_memory_clock)
1587
			seq_printf(m, "current memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev));
1588
		if (rdev->pm.current_vddc)
1589
			seq_printf(m, "voltage: %u mV\n", rdev->pm.current_vddc);
1590
		if (rdev->asic->pm.get_pcie_lanes)
1591
			seq_printf(m, "PCIE lanes: %d\n", radeon_get_pcie_lanes(rdev));
5078 serge 1592
	}
1268 serge 1593
 
1594
	return 0;
1595
}
1596
 
1597
static struct drm_info_list radeon_pm_info_list[] = {
1598
	{"radeon_pm_info", radeon_debugfs_pm_info, 0, NULL},
1599
};
1600
#endif
1601
 
1430 serge 1602
static int radeon_debugfs_pm_init(struct radeon_device *rdev)
1268 serge 1603
{
1604
#if defined(CONFIG_DEBUG_FS)
1605
	return radeon_debugfs_add_files(rdev, radeon_pm_info_list, ARRAY_SIZE(radeon_pm_info_list));
1606
#else
1607
	return 0;
1608
#endif
1609
}