Subversion Repositories Kolibri OS

Rev

Rev 2997 | Rev 5078 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
1268 serge 1
/*
2
 * Permission is hereby granted, free of charge, to any person obtaining a
3
 * copy of this software and associated documentation files (the "Software"),
4
 * to deal in the Software without restriction, including without limitation
5
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
6
 * and/or sell copies of the Software, and to permit persons to whom the
7
 * Software is furnished to do so, subject to the following conditions:
8
 *
9
 * The above copyright notice and this permission notice shall be included in
10
 * all copies or substantial portions of the Software.
11
 *
12
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
13
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
15
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
16
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
17
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
18
 * OTHER DEALINGS IN THE SOFTWARE.
19
 *
20
 * Authors: Rafał Miłecki 
1430 serge 21
 *          Alex Deucher 
1268 serge 22
 */
2997 Serge 23
#include 
1268 serge 24
#include "radeon.h"
1430 serge 25
#include "avivod.h"
1986 serge 26
#include "atom.h"
1268 serge 27
 
1963 serge 28
 
1430 serge 29
#define RADEON_IDLE_LOOP_MS 100
30
#define RADEON_RECLOCK_DELAY_MS 200
31
#define RADEON_WAIT_VBLANK_TIMEOUT 200
1268 serge 32
 
1963 serge 33
static const char *radeon_pm_state_type_name[5] = {
2997 Serge 34
	"",
1430 serge 35
	"Powersave",
36
	"Battery",
37
	"Balanced",
38
	"Performance",
39
};
40
 
1963 serge 41
static void radeon_dynpm_idle_work_handler(struct work_struct *work);
42
static int radeon_debugfs_pm_init(struct radeon_device *rdev);
43
static bool radeon_pm_in_vbl(struct radeon_device *rdev);
44
static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish);
45
static void radeon_pm_update_profile(struct radeon_device *rdev);
46
static void radeon_pm_set_clocks(struct radeon_device *rdev);
47
 
2997 Serge 48
int radeon_pm_get_type_index(struct radeon_device *rdev,
49
			     enum radeon_pm_state_type ps_type,
50
			     int instance)
51
{
52
	int i;
53
	int found_instance = -1;
1963 serge 54
 
2997 Serge 55
	for (i = 0; i < rdev->pm.num_power_states; i++) {
56
		if (rdev->pm.power_state[i].type == ps_type) {
57
			found_instance++;
58
			if (found_instance == instance)
59
				return i;
60
		}
61
	}
62
	/* return default if no match */
63
	return rdev->pm.default_power_state_index;
64
}
1963 serge 65
 
2997 Serge 66
void radeon_pm_acpi_event_handler(struct radeon_device *rdev)
1430 serge 67
{
1963 serge 68
		if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
69
			if (rdev->pm.profile == PM_PROFILE_AUTO) {
70
				mutex_lock(&rdev->pm.mutex);
71
				radeon_pm_update_profile(rdev);
72
				radeon_pm_set_clocks(rdev);
73
				mutex_unlock(&rdev->pm.mutex);
1430 serge 74
		}
75
	}
76
}
77
 
1963 serge 78
static void radeon_pm_update_profile(struct radeon_device *rdev)
1430 serge 79
{
1963 serge 80
	switch (rdev->pm.profile) {
81
	case PM_PROFILE_DEFAULT:
82
		rdev->pm.profile_index = PM_PROFILE_DEFAULT_IDX;
1430 serge 83
		break;
1963 serge 84
	case PM_PROFILE_AUTO:
85
		if (power_supply_is_system_supplied() > 0) {
86
			if (rdev->pm.active_crtc_count > 1)
87
				rdev->pm.profile_index = PM_PROFILE_HIGH_MH_IDX;
88
			else
89
				rdev->pm.profile_index = PM_PROFILE_HIGH_SH_IDX;
1430 serge 90
		} else {
1963 serge 91
			if (rdev->pm.active_crtc_count > 1)
92
				rdev->pm.profile_index = PM_PROFILE_MID_MH_IDX;
93
			else
94
				rdev->pm.profile_index = PM_PROFILE_MID_SH_IDX;
1430 serge 95
		}
96
		break;
1963 serge 97
	case PM_PROFILE_LOW:
98
		if (rdev->pm.active_crtc_count > 1)
99
			rdev->pm.profile_index = PM_PROFILE_LOW_MH_IDX;
100
		else
101
			rdev->pm.profile_index = PM_PROFILE_LOW_SH_IDX;
1430 serge 102
		break;
1963 serge 103
	case PM_PROFILE_MID:
104
		if (rdev->pm.active_crtc_count > 1)
105
			rdev->pm.profile_index = PM_PROFILE_MID_MH_IDX;
106
		else
107
			rdev->pm.profile_index = PM_PROFILE_MID_SH_IDX;
108
		break;
109
	case PM_PROFILE_HIGH:
110
		if (rdev->pm.active_crtc_count > 1)
111
			rdev->pm.profile_index = PM_PROFILE_HIGH_MH_IDX;
112
		else
113
			rdev->pm.profile_index = PM_PROFILE_HIGH_SH_IDX;
114
		break;
1430 serge 115
	}
116
 
1963 serge 117
	if (rdev->pm.active_crtc_count == 0) {
118
		rdev->pm.requested_power_state_index =
119
			rdev->pm.profiles[rdev->pm.profile_index].dpms_off_ps_idx;
120
		rdev->pm.requested_clock_mode_index =
121
			rdev->pm.profiles[rdev->pm.profile_index].dpms_off_cm_idx;
122
	} else {
123
		rdev->pm.requested_power_state_index =
124
			rdev->pm.profiles[rdev->pm.profile_index].dpms_on_ps_idx;
125
		rdev->pm.requested_clock_mode_index =
126
			rdev->pm.profiles[rdev->pm.profile_index].dpms_on_cm_idx;
1430 serge 127
		}
1963 serge 128
}
129
 
130
static void radeon_unmap_vram_bos(struct radeon_device *rdev)
131
{
132
	struct radeon_bo *bo, *n;
133
 
134
	if (list_empty(&rdev->gem.objects))
135
		return;
136
 
137
}
138
 
2997 Serge 139
static void radeon_sync_with_vblank(struct radeon_device *rdev)
140
{
141
	if (rdev->pm.active_crtcs) {
142
		rdev->pm.vblank_sync = false;
143
//       wait_event_timeout(
144
//           rdev->irq.vblank_queue, rdev->pm.vblank_sync,
145
//           msecs_to_jiffies(RADEON_WAIT_VBLANK_TIMEOUT));
146
    }
147
}
1963 serge 148
 
149
static void radeon_set_power_state(struct radeon_device *rdev)
150
{
151
	u32 sclk, mclk;
152
	bool misc_after = false;
153
 
154
	if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) &&
155
	    (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index))
156
		return;
157
 
158
	if (radeon_gui_idle(rdev)) {
159
		sclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
160
			clock_info[rdev->pm.requested_clock_mode_index].sclk;
161
		if (sclk > rdev->pm.default_sclk)
162
			sclk = rdev->pm.default_sclk;
163
 
2997 Serge 164
		/* starting with BTC, there is one state that is used for both
165
		 * MH and SH.  Difference is that we always use the high clock index for
3764 Serge 166
		 * mclk and vddci.
2997 Serge 167
		 */
168
		if ((rdev->pm.pm_method == PM_METHOD_PROFILE) &&
169
		    (rdev->family >= CHIP_BARTS) &&
170
		    rdev->pm.active_crtc_count &&
171
		    ((rdev->pm.profile_index == PM_PROFILE_MID_MH_IDX) ||
172
		     (rdev->pm.profile_index == PM_PROFILE_LOW_MH_IDX)))
173
			mclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
174
				clock_info[rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx].mclk;
175
		else
1963 serge 176
		mclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
177
			clock_info[rdev->pm.requested_clock_mode_index].mclk;
2997 Serge 178
 
1963 serge 179
		if (mclk > rdev->pm.default_mclk)
180
			mclk = rdev->pm.default_mclk;
181
 
182
		/* upvolt before raising clocks, downvolt after lowering clocks */
183
		if (sclk < rdev->pm.current_sclk)
184
			misc_after = true;
185
 
2997 Serge 186
		radeon_sync_with_vblank(rdev);
1963 serge 187
 
188
		if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
189
			if (!radeon_pm_in_vbl(rdev))
190
				return;
191
		}
192
 
193
		radeon_pm_prepare(rdev);
194
 
195
		if (!misc_after)
196
			/* voltage, pcie lanes, etc.*/
197
			radeon_pm_misc(rdev);
198
 
199
	/* set engine clock */
200
		if (sclk != rdev->pm.current_sclk) {
201
	radeon_pm_debug_check_in_vbl(rdev, false);
202
			radeon_set_engine_clock(rdev, sclk);
203
	radeon_pm_debug_check_in_vbl(rdev, true);
204
			rdev->pm.current_sclk = sclk;
205
			DRM_DEBUG_DRIVER("Setting: e: %d\n", sclk);
206
		}
207
 
208
	/* set memory clock */
2997 Serge 209
		if (rdev->asic->pm.set_memory_clock && (mclk != rdev->pm.current_mclk)) {
1963 serge 210
		radeon_pm_debug_check_in_vbl(rdev, false);
211
			radeon_set_memory_clock(rdev, mclk);
212
		radeon_pm_debug_check_in_vbl(rdev, true);
213
			rdev->pm.current_mclk = mclk;
214
			DRM_DEBUG_DRIVER("Setting: m: %d\n", mclk);
215
		}
216
 
217
		if (misc_after)
218
			/* voltage, pcie lanes, etc.*/
219
			radeon_pm_misc(rdev);
220
 
221
		radeon_pm_finish(rdev);
222
 
223
		rdev->pm.current_power_state_index = rdev->pm.requested_power_state_index;
224
		rdev->pm.current_clock_mode_index = rdev->pm.requested_clock_mode_index;
225
	} else
226
		DRM_DEBUG_DRIVER("pm: GUI not idle!!!\n");
227
}
228
 
229
static void radeon_pm_set_clocks(struct radeon_device *rdev)
230
{
3764 Serge 231
	int i, r;
1963 serge 232
 
233
	/* no need to take locks, etc. if nothing's going to change */
234
	if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) &&
235
	    (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index))
236
		return;
237
 
238
	mutex_lock(&rdev->ddev->struct_mutex);
2997 Serge 239
//   down_write(&rdev->pm.mclk_lock);
240
	mutex_lock(&rdev->ring_lock);
1963 serge 241
 
2997 Serge 242
	/* wait for the rings to drain */
243
	for (i = 0; i < RADEON_NUM_RINGS; i++) {
244
		struct radeon_ring *ring = &rdev->ring[i];
3764 Serge 245
		if (!ring->ready) {
246
			continue;
247
		}
248
		r = radeon_fence_wait_empty_locked(rdev, i);
249
		if (r) {
250
			/* needs a GPU reset dont reset here */
251
			mutex_unlock(&rdev->ring_lock);
252
//			up_write(&rdev->pm.mclk_lock);
253
			mutex_unlock(&rdev->ddev->struct_mutex);
254
			return;
255
		}
1430 serge 256
	}
2997 Serge 257
 
1963 serge 258
	radeon_unmap_vram_bos(rdev);
1430 serge 259
 
1963 serge 260
	if (rdev->irq.installed) {
261
		for (i = 0; i < rdev->num_crtc; i++) {
262
			if (rdev->pm.active_crtcs & (1 << i)) {
263
				rdev->pm.req_vblank |= (1 << i);
264
//               drm_vblank_get(rdev->ddev, i);
265
			}
266
		}
267
	}
268
 
269
	radeon_set_power_state(rdev);
270
 
271
	if (rdev->irq.installed) {
272
		for (i = 0; i < rdev->num_crtc; i++) {
273
			if (rdev->pm.req_vblank & (1 << i)) {
274
				rdev->pm.req_vblank &= ~(1 << i);
275
//               drm_vblank_put(rdev->ddev, i);
276
			}
277
		}
278
	}
279
 
280
	/* update display watermarks based on new power state */
281
	radeon_update_bandwidth_info(rdev);
282
	if (rdev->pm.active_crtc_count)
283
		radeon_bandwidth_update(rdev);
284
 
285
	rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
286
 
2997 Serge 287
	mutex_unlock(&rdev->ring_lock);
288
//   up_write(&rdev->pm.mclk_lock);
1963 serge 289
	mutex_unlock(&rdev->ddev->struct_mutex);
1430 serge 290
}
291
 
1963 serge 292
static void radeon_pm_print_states(struct radeon_device *rdev)
1430 serge 293
{
1963 serge 294
	int i, j;
295
	struct radeon_power_state *power_state;
296
	struct radeon_pm_clock_info *clock_info;
297
 
298
	DRM_DEBUG_DRIVER("%d Power State(s)\n", rdev->pm.num_power_states);
299
	for (i = 0; i < rdev->pm.num_power_states; i++) {
300
		power_state = &rdev->pm.power_state[i];
301
		DRM_DEBUG_DRIVER("State %d: %s\n", i,
302
			radeon_pm_state_type_name[power_state->type]);
303
		if (i == rdev->pm.default_power_state_index)
304
			DRM_DEBUG_DRIVER("\tDefault");
305
		if ((rdev->flags & RADEON_IS_PCIE) && !(rdev->flags & RADEON_IS_IGP))
306
			DRM_DEBUG_DRIVER("\t%d PCIE Lanes\n", power_state->pcie_lanes);
307
		if (power_state->flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
308
			DRM_DEBUG_DRIVER("\tSingle display only\n");
309
		DRM_DEBUG_DRIVER("\t%d Clock Mode(s)\n", power_state->num_clock_modes);
310
		for (j = 0; j < power_state->num_clock_modes; j++) {
311
			clock_info = &(power_state->clock_info[j]);
312
			if (rdev->flags & RADEON_IS_IGP)
2997 Serge 313
				DRM_DEBUG_DRIVER("\t\t%d e: %d\n",
1963 serge 314
					j,
2997 Serge 315
						 clock_info->sclk * 10);
1963 serge 316
			else
2997 Serge 317
				DRM_DEBUG_DRIVER("\t\t%d e: %d\tm: %d\tv: %d\n",
1963 serge 318
					j,
319
					clock_info->sclk * 10,
320
					clock_info->mclk * 10,
2997 Serge 321
						 clock_info->voltage.voltage);
1963 serge 322
		}
1430 serge 323
	}
1963 serge 324
}
1430 serge 325
 
1963 serge 326
static ssize_t radeon_get_pm_profile(struct device *dev,
327
				     struct device_attribute *attr,
328
				     char *buf)
329
{
2997 Serge 330
	struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
331
	struct radeon_device *rdev = ddev->dev_private;
332
	int cp = rdev->pm.profile;
1963 serge 333
 
2997 Serge 334
	return snprintf(buf, PAGE_SIZE, "%s\n",
335
			(cp == PM_PROFILE_AUTO) ? "auto" :
336
			(cp == PM_PROFILE_LOW) ? "low" :
337
			(cp == PM_PROFILE_MID) ? "mid" :
338
			(cp == PM_PROFILE_HIGH) ? "high" : "default");
1430 serge 339
}
340
 
1963 serge 341
static ssize_t radeon_set_pm_profile(struct device *dev,
342
				     struct device_attribute *attr,
343
				     const char *buf,
344
				     size_t count)
1430 serge 345
{
1963 serge 346
	struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
347
	struct radeon_device *rdev = ddev->dev_private;
348
 
349
	mutex_lock(&rdev->pm.mutex);
2997 Serge 350
	if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
351
		if (strncmp("default", buf, strlen("default")) == 0)
1963 serge 352
    rdev->pm.profile = PM_PROFILE_DEFAULT;
2997 Serge 353
		else if (strncmp("auto", buf, strlen("auto")) == 0)
354
			rdev->pm.profile = PM_PROFILE_AUTO;
355
		else if (strncmp("low", buf, strlen("low")) == 0)
356
			rdev->pm.profile = PM_PROFILE_LOW;
357
		else if (strncmp("mid", buf, strlen("mid")) == 0)
358
			rdev->pm.profile = PM_PROFILE_MID;
359
		else if (strncmp("high", buf, strlen("high")) == 0)
360
			rdev->pm.profile = PM_PROFILE_HIGH;
361
		else {
362
			count = -EINVAL;
363
			goto fail;
364
		}
1963 serge 365
    radeon_pm_update_profile(rdev);
366
    radeon_pm_set_clocks(rdev);
2997 Serge 367
	} else
368
		count = -EINVAL;
369
 
1963 serge 370
fail:
371
	mutex_unlock(&rdev->pm.mutex);
372
 
373
	return count;
374
}
375
 
376
static ssize_t radeon_get_pm_method(struct device *dev,
377
				    struct device_attribute *attr,
378
				    char *buf)
379
{
380
	struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
381
	struct radeon_device *rdev = ddev->dev_private;
382
	int pm = rdev->pm.pm_method;
383
 
384
	return snprintf(buf, PAGE_SIZE, "%s\n",
385
			(pm == PM_METHOD_DYNPM) ? "dynpm" : "profile");
386
}
387
 
388
static ssize_t radeon_set_pm_method(struct device *dev,
389
				    struct device_attribute *attr,
390
				    const char *buf,
391
				    size_t count)
392
{
393
	struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
394
	struct radeon_device *rdev = ddev->dev_private;
395
 
396
 
397
	if (strncmp("dynpm", buf, strlen("dynpm")) == 0) {
398
		mutex_lock(&rdev->pm.mutex);
399
		rdev->pm.pm_method = PM_METHOD_DYNPM;
400
		rdev->pm.dynpm_state = DYNPM_STATE_PAUSED;
401
		rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
402
		mutex_unlock(&rdev->pm.mutex);
403
	} else if (strncmp("profile", buf, strlen("profile")) == 0) {
404
		mutex_lock(&rdev->pm.mutex);
405
		/* disable dynpm */
406
		rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
407
		rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
408
		rdev->pm.pm_method = PM_METHOD_PROFILE;
409
		mutex_unlock(&rdev->pm.mutex);
410
//		cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work);
411
	} else {
2997 Serge 412
		count = -EINVAL;
1963 serge 413
		goto fail;
414
	}
415
	radeon_pm_compute_clocks(rdev);
416
fail:
417
	return count;
418
}
419
 
2997 Serge 420
//static DEVICE_ATTR(power_profile, S_IRUGO | S_IWUSR, radeon_get_pm_profile, radeon_set_pm_profile);
421
//static DEVICE_ATTR(power_method, S_IRUGO | S_IWUSR, radeon_get_pm_method, radeon_set_pm_method);
422
 
1963 serge 423
static ssize_t radeon_hwmon_show_temp(struct device *dev,
424
				      struct device_attribute *attr,
425
				      char *buf)
426
{
427
	struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
428
	struct radeon_device *rdev = ddev->dev_private;
2997 Serge 429
	int temp;
1963 serge 430
 
431
	switch (rdev->pm.int_thermal_type) {
432
	case THERMAL_TYPE_RV6XX:
433
		temp = rv6xx_get_temp(rdev);
1430 serge 434
		break;
1963 serge 435
	case THERMAL_TYPE_RV770:
436
		temp = rv770_get_temp(rdev);
1430 serge 437
		break;
1963 serge 438
	case THERMAL_TYPE_EVERGREEN:
439
	case THERMAL_TYPE_NI:
440
		temp = evergreen_get_temp(rdev);
1430 serge 441
		break;
2997 Serge 442
	case THERMAL_TYPE_SUMO:
443
		temp = sumo_get_temp(rdev);
444
		break;
445
	case THERMAL_TYPE_SI:
446
		temp = si_get_temp(rdev);
447
		break;
1430 serge 448
	default:
1963 serge 449
		temp = 0;
450
		break;
1430 serge 451
	}
1963 serge 452
 
453
	return snprintf(buf, PAGE_SIZE, "%d\n", temp);
1430 serge 454
}
455
 
1963 serge 456
static ssize_t radeon_hwmon_show_name(struct device *dev,
457
				      struct device_attribute *attr,
458
				      char *buf)
1430 serge 459
{
1963 serge 460
	return sprintf(buf, "radeon\n");
461
}
1430 serge 462
 
1963 serge 463
static int radeon_hwmon_init(struct radeon_device *rdev)
464
{
465
	int err = 0;
1430 serge 466
 
1963 serge 467
	rdev->pm.int_hwmon_dev = NULL;
468
 
469
	return err;
1430 serge 470
}
471
 
1963 serge 472
static void radeon_hwmon_fini(struct radeon_device *rdev)
473
{
474
}
475
 
476
void radeon_pm_suspend(struct radeon_device *rdev)
477
{
478
	mutex_lock(&rdev->pm.mutex);
479
	if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
480
		if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE)
481
			rdev->pm.dynpm_state = DYNPM_STATE_SUSPENDED;
482
	}
483
	mutex_unlock(&rdev->pm.mutex);
484
 
485
//	cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work);
486
}
487
 
488
void radeon_pm_resume(struct radeon_device *rdev)
489
{
490
	/* asic init will reset the default power state */
491
	mutex_lock(&rdev->pm.mutex);
492
	rdev->pm.current_power_state_index = rdev->pm.default_power_state_index;
493
	rdev->pm.current_clock_mode_index = 0;
494
	rdev->pm.current_sclk = rdev->pm.default_sclk;
495
	rdev->pm.current_mclk = rdev->pm.default_mclk;
496
	rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage;
3764 Serge 497
	rdev->pm.current_vddci = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.vddci;
1963 serge 498
	if (rdev->pm.pm_method == PM_METHOD_DYNPM
499
	    && rdev->pm.dynpm_state == DYNPM_STATE_SUSPENDED) {
500
		rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
501
//		schedule_delayed_work(&rdev->pm.dynpm_idle_work,
502
//					msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
503
	}
504
	mutex_unlock(&rdev->pm.mutex);
505
	radeon_pm_compute_clocks(rdev);
506
}
507
 
1268 serge 508
int radeon_pm_init(struct radeon_device *rdev)
509
{
1963 serge 510
	int ret;
1430 serge 511
 
1963 serge 512
	/* default to profile method */
513
	rdev->pm.pm_method = PM_METHOD_PROFILE;
514
	rdev->pm.profile = PM_PROFILE_DEFAULT;
515
	rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
516
	rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
517
	rdev->pm.dynpm_can_upclock = true;
518
	rdev->pm.dynpm_can_downclock = true;
519
	rdev->pm.default_sclk = rdev->clock.default_sclk;
520
	rdev->pm.default_mclk = rdev->clock.default_mclk;
521
	rdev->pm.current_sclk = rdev->clock.default_sclk;
522
	rdev->pm.current_mclk = rdev->clock.default_mclk;
523
	rdev->pm.int_thermal_type = THERMAL_TYPE_NONE;
524
 
1430 serge 525
	if (rdev->bios) {
526
		if (rdev->is_atom_bios)
527
			radeon_atombios_get_power_modes(rdev);
528
		else
529
			radeon_combios_get_power_modes(rdev);
1963 serge 530
		radeon_pm_print_states(rdev);
531
		radeon_pm_init_profile(rdev);
1430 serge 532
	}
533
 
1963 serge 534
	/* set up the internal thermal sensor if applicable */
535
	ret = radeon_hwmon_init(rdev);
536
	if (ret)
537
		return ret;
1268 serge 538
 
1963 serge 539
	if (rdev->pm.num_power_states > 1) {
1430 serge 540
 
1963 serge 541
		DRM_INFO("radeon: power management initialized\n");
1430 serge 542
	}
543
 
1268 serge 544
	return 0;
545
}
546
 
1963 serge 547
void radeon_pm_fini(struct radeon_device *rdev)
548
{
549
	if (rdev->pm.num_power_states > 1) {
550
		mutex_lock(&rdev->pm.mutex);
551
		if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
552
			rdev->pm.profile = PM_PROFILE_DEFAULT;
553
			radeon_pm_update_profile(rdev);
554
			radeon_pm_set_clocks(rdev);
555
		} else if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
556
			/* reset default clocks */
557
			rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
558
			rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
559
			radeon_pm_set_clocks(rdev);
560
		}
561
		mutex_unlock(&rdev->pm.mutex);
562
 
563
//		cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work);
564
 
565
    }
566
 
567
	radeon_hwmon_fini(rdev);
568
}
569
 
1430 serge 570
void radeon_pm_compute_clocks(struct radeon_device *rdev)
571
{
572
	struct drm_device *ddev = rdev->ddev;
1963 serge 573
	struct drm_crtc *crtc;
1430 serge 574
	struct radeon_crtc *radeon_crtc;
575
 
1963 serge 576
	if (rdev->pm.num_power_states < 2)
1430 serge 577
		return;
578
 
579
	mutex_lock(&rdev->pm.mutex);
580
 
581
	rdev->pm.active_crtcs = 0;
1963 serge 582
	rdev->pm.active_crtc_count = 0;
583
	list_for_each_entry(crtc,
584
		&ddev->mode_config.crtc_list, head) {
585
		radeon_crtc = to_radeon_crtc(crtc);
586
		if (radeon_crtc->enabled) {
1430 serge 587
			rdev->pm.active_crtcs |= (1 << radeon_crtc->crtc_id);
1963 serge 588
			rdev->pm.active_crtc_count++;
1430 serge 589
		}
590
	}
591
 
1963 serge 592
	if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
593
		radeon_pm_update_profile(rdev);
594
		radeon_pm_set_clocks(rdev);
595
	} else if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
596
		if (rdev->pm.dynpm_state != DYNPM_STATE_DISABLED) {
597
			if (rdev->pm.active_crtc_count > 1) {
598
				if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) {
599
//                   cancel_delayed_work(&rdev->pm.dynpm_idle_work);
1430 serge 600
 
1963 serge 601
					rdev->pm.dynpm_state = DYNPM_STATE_PAUSED;
602
					rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
603
					radeon_pm_get_dynpm_state(rdev);
1430 serge 604
				radeon_pm_set_clocks(rdev);
605
 
1963 serge 606
					DRM_DEBUG_DRIVER("radeon: dynamic power management deactivated\n");
1430 serge 607
		}
1963 serge 608
			} else if (rdev->pm.active_crtc_count == 1) {
1430 serge 609
		/* TODO: Increase clocks if needed for current mode */
610
 
1963 serge 611
				if (rdev->pm.dynpm_state == DYNPM_STATE_MINIMUM) {
612
					rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
613
					rdev->pm.dynpm_planned_action = DYNPM_ACTION_UPCLOCK;
614
					radeon_pm_get_dynpm_state(rdev);
1430 serge 615
			radeon_pm_set_clocks(rdev);
1963 serge 616
 
617
//					schedule_delayed_work(&rdev->pm.dynpm_idle_work,
618
//							   msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
619
				} else if (rdev->pm.dynpm_state == DYNPM_STATE_PAUSED) {
620
					rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
621
//					schedule_delayed_work(&rdev->pm.dynpm_idle_work,
622
//							   msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
623
					DRM_DEBUG_DRIVER("radeon: dynamic power management activated\n");
1430 serge 624
        }
1963 serge 625
			} else { /* count == 0 */
626
				if (rdev->pm.dynpm_state != DYNPM_STATE_MINIMUM) {
627
//					cancel_delayed_work(&rdev->pm.dynpm_idle_work);
628
 
629
					rdev->pm.dynpm_state = DYNPM_STATE_MINIMUM;
630
					rdev->pm.dynpm_planned_action = DYNPM_ACTION_MINIMUM;
631
					radeon_pm_get_dynpm_state(rdev);
632
					radeon_pm_set_clocks(rdev);
1430 serge 633
		}
634
	}
635
		}
636
	}
637
 
638
	mutex_unlock(&rdev->pm.mutex);
639
}
640
 
1963 serge 641
static bool radeon_pm_in_vbl(struct radeon_device *rdev)
1430 serge 642
{
1963 serge 643
	int  crtc, vpos, hpos, vbl_status;
1430 serge 644
	bool in_vbl = true;
645
 
1963 serge 646
	/* Iterate over all active crtc's. All crtc's must be in vblank,
647
	 * otherwise return in_vbl == false.
648
	 */
649
	for (crtc = 0; (crtc < rdev->num_crtc) && in_vbl; crtc++) {
650
		if (rdev->pm.active_crtcs & (1 << crtc)) {
651
			vbl_status = radeon_get_crtc_scanoutpos(rdev->ddev, crtc, &vpos, &hpos);
652
			if ((vbl_status & DRM_SCANOUTPOS_VALID) &&
653
			    !(vbl_status & DRM_SCANOUTPOS_INVBL))
1430 serge 654
				in_vbl = false;
655
		}
656
		}
1963 serge 657
 
1430 serge 658
	return in_vbl;
659
}
660
 
1963 serge 661
static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish)
1430 serge 662
{
1963 serge 663
	u32 stat_crtc = 0;
664
	bool in_vbl = radeon_pm_in_vbl(rdev);
1430 serge 665
 
1963 serge 666
	if (in_vbl == false)
667
		DRM_DEBUG_DRIVER("not in vbl for pm change %08x at %s\n", stat_crtc,
668
			 finish ? "exit" : "entry");
669
	return in_vbl;
1430 serge 670
}
671
 
672
 
1268 serge 673
/*
674
 * Debugfs info
675
 */
676
#if defined(CONFIG_DEBUG_FS)
677
 
678
static int radeon_debugfs_pm_info(struct seq_file *m, void *data)
679
{
680
	struct drm_info_node *node = (struct drm_info_node *) m->private;
681
	struct drm_device *dev = node->minor->dev;
682
	struct radeon_device *rdev = dev->dev_private;
683
 
1963 serge 684
	seq_printf(m, "default engine clock: %u0 kHz\n", rdev->pm.default_sclk);
3764 Serge 685
	/* radeon_get_engine_clock is not reliable on APUs so just print the current clock */
686
	if ((rdev->family >= CHIP_PALM) && (rdev->flags & RADEON_IS_IGP))
687
		seq_printf(m, "current engine clock: %u0 kHz\n", rdev->pm.current_sclk);
688
	else
1404 serge 689
	seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev));
1963 serge 690
	seq_printf(m, "default memory clock: %u0 kHz\n", rdev->pm.default_mclk);
3764 Serge 691
	if (rdev->asic->pm.get_memory_clock)
1404 serge 692
		seq_printf(m, "current memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev));
1963 serge 693
	if (rdev->pm.current_vddc)
694
		seq_printf(m, "voltage: %u mV\n", rdev->pm.current_vddc);
3764 Serge 695
	if (rdev->asic->pm.get_pcie_lanes)
1430 serge 696
		seq_printf(m, "PCIE lanes: %d\n", radeon_get_pcie_lanes(rdev));
1268 serge 697
 
698
	return 0;
699
}
700
 
701
static struct drm_info_list radeon_pm_info_list[] = {
702
	{"radeon_pm_info", radeon_debugfs_pm_info, 0, NULL},
703
};
704
#endif
705
 
1430 serge 706
static int radeon_debugfs_pm_init(struct radeon_device *rdev)
1268 serge 707
{
708
#if defined(CONFIG_DEBUG_FS)
709
	return radeon_debugfs_add_files(rdev, radeon_pm_info_list, ARRAY_SIZE(radeon_pm_info_list));
710
#else
711
	return 0;
712
#endif
713
}