Subversion Repositories Kolibri OS

Rev

Rev 1986 | Rev 3764 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
1268 serge 1
/*
2
 * Permission is hereby granted, free of charge, to any person obtaining a
3
 * copy of this software and associated documentation files (the "Software"),
4
 * to deal in the Software without restriction, including without limitation
5
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
6
 * and/or sell copies of the Software, and to permit persons to whom the
7
 * Software is furnished to do so, subject to the following conditions:
8
 *
9
 * The above copyright notice and this permission notice shall be included in
10
 * all copies or substantial portions of the Software.
11
 *
12
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
13
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
15
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
16
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
17
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
18
 * OTHER DEALINGS IN THE SOFTWARE.
19
 *
20
 * Authors: Rafał Miłecki 
1430 serge 21
 *          Alex Deucher 
1268 serge 22
 */
2997 Serge 23
#include 
1268 serge 24
#include "radeon.h"
1430 serge 25
#include "avivod.h"
1986 serge 26
#include "atom.h"
1268 serge 27
 
1963 serge 28
 
1430 serge 29
#define RADEON_IDLE_LOOP_MS 100
30
#define RADEON_RECLOCK_DELAY_MS 200
31
#define RADEON_WAIT_VBLANK_TIMEOUT 200
1268 serge 32
 
1963 serge 33
static const char *radeon_pm_state_type_name[5] = {
2997 Serge 34
	"",
1430 serge 35
	"Powersave",
36
	"Battery",
37
	"Balanced",
38
	"Performance",
39
};
40
 
1963 serge 41
static void radeon_dynpm_idle_work_handler(struct work_struct *work);
42
static int radeon_debugfs_pm_init(struct radeon_device *rdev);
43
static bool radeon_pm_in_vbl(struct radeon_device *rdev);
44
static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish);
45
static void radeon_pm_update_profile(struct radeon_device *rdev);
46
static void radeon_pm_set_clocks(struct radeon_device *rdev);
47
 
2997 Serge 48
int radeon_pm_get_type_index(struct radeon_device *rdev,
49
			     enum radeon_pm_state_type ps_type,
50
			     int instance)
51
{
52
	int i;
53
	int found_instance = -1;
1963 serge 54
 
2997 Serge 55
	for (i = 0; i < rdev->pm.num_power_states; i++) {
56
		if (rdev->pm.power_state[i].type == ps_type) {
57
			found_instance++;
58
			if (found_instance == instance)
59
				return i;
60
		}
61
	}
62
	/* return default if no match */
63
	return rdev->pm.default_power_state_index;
64
}
1963 serge 65
 
2997 Serge 66
void radeon_pm_acpi_event_handler(struct radeon_device *rdev)
1430 serge 67
{
1963 serge 68
		if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
69
			if (rdev->pm.profile == PM_PROFILE_AUTO) {
70
				mutex_lock(&rdev->pm.mutex);
71
				radeon_pm_update_profile(rdev);
72
				radeon_pm_set_clocks(rdev);
73
				mutex_unlock(&rdev->pm.mutex);
1430 serge 74
		}
75
	}
76
}
77
 
1963 serge 78
static void radeon_pm_update_profile(struct radeon_device *rdev)
1430 serge 79
{
1963 serge 80
	switch (rdev->pm.profile) {
81
	case PM_PROFILE_DEFAULT:
82
		rdev->pm.profile_index = PM_PROFILE_DEFAULT_IDX;
1430 serge 83
		break;
1963 serge 84
	case PM_PROFILE_AUTO:
85
		if (power_supply_is_system_supplied() > 0) {
86
			if (rdev->pm.active_crtc_count > 1)
87
				rdev->pm.profile_index = PM_PROFILE_HIGH_MH_IDX;
88
			else
89
				rdev->pm.profile_index = PM_PROFILE_HIGH_SH_IDX;
1430 serge 90
		} else {
1963 serge 91
			if (rdev->pm.active_crtc_count > 1)
92
				rdev->pm.profile_index = PM_PROFILE_MID_MH_IDX;
93
			else
94
				rdev->pm.profile_index = PM_PROFILE_MID_SH_IDX;
1430 serge 95
		}
96
		break;
1963 serge 97
	case PM_PROFILE_LOW:
98
		if (rdev->pm.active_crtc_count > 1)
99
			rdev->pm.profile_index = PM_PROFILE_LOW_MH_IDX;
100
		else
101
			rdev->pm.profile_index = PM_PROFILE_LOW_SH_IDX;
1430 serge 102
		break;
1963 serge 103
	case PM_PROFILE_MID:
104
		if (rdev->pm.active_crtc_count > 1)
105
			rdev->pm.profile_index = PM_PROFILE_MID_MH_IDX;
106
		else
107
			rdev->pm.profile_index = PM_PROFILE_MID_SH_IDX;
108
		break;
109
	case PM_PROFILE_HIGH:
110
		if (rdev->pm.active_crtc_count > 1)
111
			rdev->pm.profile_index = PM_PROFILE_HIGH_MH_IDX;
112
		else
113
			rdev->pm.profile_index = PM_PROFILE_HIGH_SH_IDX;
114
		break;
1430 serge 115
	}
116
 
1963 serge 117
	if (rdev->pm.active_crtc_count == 0) {
118
		rdev->pm.requested_power_state_index =
119
			rdev->pm.profiles[rdev->pm.profile_index].dpms_off_ps_idx;
120
		rdev->pm.requested_clock_mode_index =
121
			rdev->pm.profiles[rdev->pm.profile_index].dpms_off_cm_idx;
122
	} else {
123
		rdev->pm.requested_power_state_index =
124
			rdev->pm.profiles[rdev->pm.profile_index].dpms_on_ps_idx;
125
		rdev->pm.requested_clock_mode_index =
126
			rdev->pm.profiles[rdev->pm.profile_index].dpms_on_cm_idx;
1430 serge 127
		}
1963 serge 128
}
129
 
130
static void radeon_unmap_vram_bos(struct radeon_device *rdev)
131
{
132
	struct radeon_bo *bo, *n;
133
 
134
	if (list_empty(&rdev->gem.objects))
135
		return;
136
 
137
}
138
 
2997 Serge 139
static void radeon_sync_with_vblank(struct radeon_device *rdev)
140
{
141
	if (rdev->pm.active_crtcs) {
142
		rdev->pm.vblank_sync = false;
143
//       wait_event_timeout(
144
//           rdev->irq.vblank_queue, rdev->pm.vblank_sync,
145
//           msecs_to_jiffies(RADEON_WAIT_VBLANK_TIMEOUT));
146
    }
147
}
1963 serge 148
 
149
static void radeon_set_power_state(struct radeon_device *rdev)
150
{
151
	u32 sclk, mclk;
152
	bool misc_after = false;
153
 
154
	if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) &&
155
	    (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index))
156
		return;
157
 
158
	if (radeon_gui_idle(rdev)) {
159
		sclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
160
			clock_info[rdev->pm.requested_clock_mode_index].sclk;
161
		if (sclk > rdev->pm.default_sclk)
162
			sclk = rdev->pm.default_sclk;
163
 
2997 Serge 164
		/* starting with BTC, there is one state that is used for both
165
		 * MH and SH.  Difference is that we always use the high clock index for
166
		 * mclk.
167
		 */
168
		if ((rdev->pm.pm_method == PM_METHOD_PROFILE) &&
169
		    (rdev->family >= CHIP_BARTS) &&
170
		    rdev->pm.active_crtc_count &&
171
		    ((rdev->pm.profile_index == PM_PROFILE_MID_MH_IDX) ||
172
		     (rdev->pm.profile_index == PM_PROFILE_LOW_MH_IDX)))
173
			mclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
174
				clock_info[rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx].mclk;
175
		else
1963 serge 176
		mclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
177
			clock_info[rdev->pm.requested_clock_mode_index].mclk;
2997 Serge 178
 
1963 serge 179
		if (mclk > rdev->pm.default_mclk)
180
			mclk = rdev->pm.default_mclk;
181
 
182
		/* upvolt before raising clocks, downvolt after lowering clocks */
183
		if (sclk < rdev->pm.current_sclk)
184
			misc_after = true;
185
 
2997 Serge 186
		radeon_sync_with_vblank(rdev);
1963 serge 187
 
188
		if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
189
			if (!radeon_pm_in_vbl(rdev))
190
				return;
191
		}
192
 
193
		radeon_pm_prepare(rdev);
194
 
195
		if (!misc_after)
196
			/* voltage, pcie lanes, etc.*/
197
			radeon_pm_misc(rdev);
198
 
199
	/* set engine clock */
200
		if (sclk != rdev->pm.current_sclk) {
201
	radeon_pm_debug_check_in_vbl(rdev, false);
202
			radeon_set_engine_clock(rdev, sclk);
203
	radeon_pm_debug_check_in_vbl(rdev, true);
204
			rdev->pm.current_sclk = sclk;
205
			DRM_DEBUG_DRIVER("Setting: e: %d\n", sclk);
206
		}
207
 
208
	/* set memory clock */
2997 Serge 209
		if (rdev->asic->pm.set_memory_clock && (mclk != rdev->pm.current_mclk)) {
1963 serge 210
		radeon_pm_debug_check_in_vbl(rdev, false);
211
			radeon_set_memory_clock(rdev, mclk);
212
		radeon_pm_debug_check_in_vbl(rdev, true);
213
			rdev->pm.current_mclk = mclk;
214
			DRM_DEBUG_DRIVER("Setting: m: %d\n", mclk);
215
		}
216
 
217
		if (misc_after)
218
			/* voltage, pcie lanes, etc.*/
219
			radeon_pm_misc(rdev);
220
 
221
		radeon_pm_finish(rdev);
222
 
223
		rdev->pm.current_power_state_index = rdev->pm.requested_power_state_index;
224
		rdev->pm.current_clock_mode_index = rdev->pm.requested_clock_mode_index;
225
	} else
226
		DRM_DEBUG_DRIVER("pm: GUI not idle!!!\n");
227
}
228
 
229
static void radeon_pm_set_clocks(struct radeon_device *rdev)
230
{
231
	int i;
232
 
233
	/* no need to take locks, etc. if nothing's going to change */
234
	if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) &&
235
	    (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index))
236
		return;
237
 
238
	mutex_lock(&rdev->ddev->struct_mutex);
2997 Serge 239
//   down_write(&rdev->pm.mclk_lock);
240
	mutex_lock(&rdev->ring_lock);
1963 serge 241
 
2997 Serge 242
	/* wait for the rings to drain */
243
	for (i = 0; i < RADEON_NUM_RINGS; i++) {
244
		struct radeon_ring *ring = &rdev->ring[i];
245
		if (ring->ready)
246
			radeon_fence_wait_empty_locked(rdev, i);
1430 serge 247
	}
2997 Serge 248
 
1963 serge 249
	radeon_unmap_vram_bos(rdev);
1430 serge 250
 
1963 serge 251
	if (rdev->irq.installed) {
252
		for (i = 0; i < rdev->num_crtc; i++) {
253
			if (rdev->pm.active_crtcs & (1 << i)) {
254
				rdev->pm.req_vblank |= (1 << i);
255
//               drm_vblank_get(rdev->ddev, i);
256
			}
257
		}
258
	}
259
 
260
	radeon_set_power_state(rdev);
261
 
262
	if (rdev->irq.installed) {
263
		for (i = 0; i < rdev->num_crtc; i++) {
264
			if (rdev->pm.req_vblank & (1 << i)) {
265
				rdev->pm.req_vblank &= ~(1 << i);
266
//               drm_vblank_put(rdev->ddev, i);
267
			}
268
		}
269
	}
270
 
271
	/* update display watermarks based on new power state */
272
	radeon_update_bandwidth_info(rdev);
273
	if (rdev->pm.active_crtc_count)
274
		radeon_bandwidth_update(rdev);
275
 
276
	rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
277
 
2997 Serge 278
	mutex_unlock(&rdev->ring_lock);
279
//   up_write(&rdev->pm.mclk_lock);
1963 serge 280
	mutex_unlock(&rdev->ddev->struct_mutex);
1430 serge 281
}
282
 
1963 serge 283
static void radeon_pm_print_states(struct radeon_device *rdev)
1430 serge 284
{
1963 serge 285
	int i, j;
286
	struct radeon_power_state *power_state;
287
	struct radeon_pm_clock_info *clock_info;
288
 
289
	DRM_DEBUG_DRIVER("%d Power State(s)\n", rdev->pm.num_power_states);
290
	for (i = 0; i < rdev->pm.num_power_states; i++) {
291
		power_state = &rdev->pm.power_state[i];
292
		DRM_DEBUG_DRIVER("State %d: %s\n", i,
293
			radeon_pm_state_type_name[power_state->type]);
294
		if (i == rdev->pm.default_power_state_index)
295
			DRM_DEBUG_DRIVER("\tDefault");
296
		if ((rdev->flags & RADEON_IS_PCIE) && !(rdev->flags & RADEON_IS_IGP))
297
			DRM_DEBUG_DRIVER("\t%d PCIE Lanes\n", power_state->pcie_lanes);
298
		if (power_state->flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
299
			DRM_DEBUG_DRIVER("\tSingle display only\n");
300
		DRM_DEBUG_DRIVER("\t%d Clock Mode(s)\n", power_state->num_clock_modes);
301
		for (j = 0; j < power_state->num_clock_modes; j++) {
302
			clock_info = &(power_state->clock_info[j]);
303
			if (rdev->flags & RADEON_IS_IGP)
2997 Serge 304
				DRM_DEBUG_DRIVER("\t\t%d e: %d\n",
1963 serge 305
					j,
2997 Serge 306
						 clock_info->sclk * 10);
1963 serge 307
			else
2997 Serge 308
				DRM_DEBUG_DRIVER("\t\t%d e: %d\tm: %d\tv: %d\n",
1963 serge 309
					j,
310
					clock_info->sclk * 10,
311
					clock_info->mclk * 10,
2997 Serge 312
						 clock_info->voltage.voltage);
1963 serge 313
		}
1430 serge 314
	}
1963 serge 315
}
1430 serge 316
 
1963 serge 317
static ssize_t radeon_get_pm_profile(struct device *dev,
318
				     struct device_attribute *attr,
319
				     char *buf)
320
{
2997 Serge 321
	struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
322
	struct radeon_device *rdev = ddev->dev_private;
323
	int cp = rdev->pm.profile;
1963 serge 324
 
2997 Serge 325
	return snprintf(buf, PAGE_SIZE, "%s\n",
326
			(cp == PM_PROFILE_AUTO) ? "auto" :
327
			(cp == PM_PROFILE_LOW) ? "low" :
328
			(cp == PM_PROFILE_MID) ? "mid" :
329
			(cp == PM_PROFILE_HIGH) ? "high" : "default");
1430 serge 330
}
331
 
1963 serge 332
static ssize_t radeon_set_pm_profile(struct device *dev,
333
				     struct device_attribute *attr,
334
				     const char *buf,
335
				     size_t count)
1430 serge 336
{
1963 serge 337
	struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
338
	struct radeon_device *rdev = ddev->dev_private;
339
 
340
	mutex_lock(&rdev->pm.mutex);
2997 Serge 341
	if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
342
		if (strncmp("default", buf, strlen("default")) == 0)
1963 serge 343
    rdev->pm.profile = PM_PROFILE_DEFAULT;
2997 Serge 344
		else if (strncmp("auto", buf, strlen("auto")) == 0)
345
			rdev->pm.profile = PM_PROFILE_AUTO;
346
		else if (strncmp("low", buf, strlen("low")) == 0)
347
			rdev->pm.profile = PM_PROFILE_LOW;
348
		else if (strncmp("mid", buf, strlen("mid")) == 0)
349
			rdev->pm.profile = PM_PROFILE_MID;
350
		else if (strncmp("high", buf, strlen("high")) == 0)
351
			rdev->pm.profile = PM_PROFILE_HIGH;
352
		else {
353
			count = -EINVAL;
354
			goto fail;
355
		}
1963 serge 356
    radeon_pm_update_profile(rdev);
357
    radeon_pm_set_clocks(rdev);
2997 Serge 358
	} else
359
		count = -EINVAL;
360
 
1963 serge 361
fail:
362
	mutex_unlock(&rdev->pm.mutex);
363
 
364
	return count;
365
}
366
 
367
static ssize_t radeon_get_pm_method(struct device *dev,
368
				    struct device_attribute *attr,
369
				    char *buf)
370
{
371
	struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
372
	struct radeon_device *rdev = ddev->dev_private;
373
	int pm = rdev->pm.pm_method;
374
 
375
	return snprintf(buf, PAGE_SIZE, "%s\n",
376
			(pm == PM_METHOD_DYNPM) ? "dynpm" : "profile");
377
}
378
 
379
static ssize_t radeon_set_pm_method(struct device *dev,
380
				    struct device_attribute *attr,
381
				    const char *buf,
382
				    size_t count)
383
{
384
	struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
385
	struct radeon_device *rdev = ddev->dev_private;
386
 
387
 
388
	if (strncmp("dynpm", buf, strlen("dynpm")) == 0) {
389
		mutex_lock(&rdev->pm.mutex);
390
		rdev->pm.pm_method = PM_METHOD_DYNPM;
391
		rdev->pm.dynpm_state = DYNPM_STATE_PAUSED;
392
		rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
393
		mutex_unlock(&rdev->pm.mutex);
394
	} else if (strncmp("profile", buf, strlen("profile")) == 0) {
395
		mutex_lock(&rdev->pm.mutex);
396
		/* disable dynpm */
397
		rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
398
		rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
399
		rdev->pm.pm_method = PM_METHOD_PROFILE;
400
		mutex_unlock(&rdev->pm.mutex);
401
//		cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work);
402
	} else {
2997 Serge 403
		count = -EINVAL;
1963 serge 404
		goto fail;
405
	}
406
	radeon_pm_compute_clocks(rdev);
407
fail:
408
	return count;
409
}
410
 
2997 Serge 411
//static DEVICE_ATTR(power_profile, S_IRUGO | S_IWUSR, radeon_get_pm_profile, radeon_set_pm_profile);
412
//static DEVICE_ATTR(power_method, S_IRUGO | S_IWUSR, radeon_get_pm_method, radeon_set_pm_method);
413
 
1963 serge 414
static ssize_t radeon_hwmon_show_temp(struct device *dev,
415
				      struct device_attribute *attr,
416
				      char *buf)
417
{
418
	struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
419
	struct radeon_device *rdev = ddev->dev_private;
2997 Serge 420
	int temp;
1963 serge 421
 
422
	switch (rdev->pm.int_thermal_type) {
423
	case THERMAL_TYPE_RV6XX:
424
		temp = rv6xx_get_temp(rdev);
1430 serge 425
		break;
1963 serge 426
	case THERMAL_TYPE_RV770:
427
		temp = rv770_get_temp(rdev);
1430 serge 428
		break;
1963 serge 429
	case THERMAL_TYPE_EVERGREEN:
430
	case THERMAL_TYPE_NI:
431
		temp = evergreen_get_temp(rdev);
1430 serge 432
		break;
2997 Serge 433
	case THERMAL_TYPE_SUMO:
434
		temp = sumo_get_temp(rdev);
435
		break;
436
	case THERMAL_TYPE_SI:
437
		temp = si_get_temp(rdev);
438
		break;
1430 serge 439
	default:
1963 serge 440
		temp = 0;
441
		break;
1430 serge 442
	}
1963 serge 443
 
444
	return snprintf(buf, PAGE_SIZE, "%d\n", temp);
1430 serge 445
}
446
 
1963 serge 447
static ssize_t radeon_hwmon_show_name(struct device *dev,
448
				      struct device_attribute *attr,
449
				      char *buf)
1430 serge 450
{
1963 serge 451
	return sprintf(buf, "radeon\n");
452
}
1430 serge 453
 
1963 serge 454
static int radeon_hwmon_init(struct radeon_device *rdev)
455
{
456
	int err = 0;
1430 serge 457
 
1963 serge 458
	rdev->pm.int_hwmon_dev = NULL;
459
 
460
	return err;
1430 serge 461
}
462
 
1963 serge 463
static void radeon_hwmon_fini(struct radeon_device *rdev)
464
{
465
}
466
 
467
void radeon_pm_suspend(struct radeon_device *rdev)
468
{
469
	mutex_lock(&rdev->pm.mutex);
470
	if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
471
		if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE)
472
			rdev->pm.dynpm_state = DYNPM_STATE_SUSPENDED;
473
	}
474
	mutex_unlock(&rdev->pm.mutex);
475
 
476
//	cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work);
477
}
478
 
479
void radeon_pm_resume(struct radeon_device *rdev)
480
{
481
	/* asic init will reset the default power state */
482
	mutex_lock(&rdev->pm.mutex);
483
	rdev->pm.current_power_state_index = rdev->pm.default_power_state_index;
484
	rdev->pm.current_clock_mode_index = 0;
485
	rdev->pm.current_sclk = rdev->pm.default_sclk;
486
	rdev->pm.current_mclk = rdev->pm.default_mclk;
487
	rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage;
488
	if (rdev->pm.pm_method == PM_METHOD_DYNPM
489
	    && rdev->pm.dynpm_state == DYNPM_STATE_SUSPENDED) {
490
		rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
491
//		schedule_delayed_work(&rdev->pm.dynpm_idle_work,
492
//					msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
493
	}
494
	mutex_unlock(&rdev->pm.mutex);
495
	radeon_pm_compute_clocks(rdev);
496
}
497
 
1268 serge 498
int radeon_pm_init(struct radeon_device *rdev)
499
{
1963 serge 500
	int ret;
1430 serge 501
 
1963 serge 502
	/* default to profile method */
503
	rdev->pm.pm_method = PM_METHOD_PROFILE;
504
	rdev->pm.profile = PM_PROFILE_DEFAULT;
505
	rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
506
	rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
507
	rdev->pm.dynpm_can_upclock = true;
508
	rdev->pm.dynpm_can_downclock = true;
509
	rdev->pm.default_sclk = rdev->clock.default_sclk;
510
	rdev->pm.default_mclk = rdev->clock.default_mclk;
511
	rdev->pm.current_sclk = rdev->clock.default_sclk;
512
	rdev->pm.current_mclk = rdev->clock.default_mclk;
513
	rdev->pm.int_thermal_type = THERMAL_TYPE_NONE;
514
 
1430 serge 515
	if (rdev->bios) {
516
		if (rdev->is_atom_bios)
517
			radeon_atombios_get_power_modes(rdev);
518
		else
519
			radeon_combios_get_power_modes(rdev);
1963 serge 520
		radeon_pm_print_states(rdev);
521
		radeon_pm_init_profile(rdev);
1430 serge 522
	}
523
 
1963 serge 524
	/* set up the internal thermal sensor if applicable */
525
	ret = radeon_hwmon_init(rdev);
526
	if (ret)
527
		return ret;
1268 serge 528
 
1963 serge 529
	if (rdev->pm.num_power_states > 1) {
1430 serge 530
 
1963 serge 531
		DRM_INFO("radeon: power management initialized\n");
1430 serge 532
	}
533
 
1268 serge 534
	return 0;
535
}
536
 
1963 serge 537
void radeon_pm_fini(struct radeon_device *rdev)
538
{
539
	if (rdev->pm.num_power_states > 1) {
540
		mutex_lock(&rdev->pm.mutex);
541
		if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
542
			rdev->pm.profile = PM_PROFILE_DEFAULT;
543
			radeon_pm_update_profile(rdev);
544
			radeon_pm_set_clocks(rdev);
545
		} else if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
546
			/* reset default clocks */
547
			rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
548
			rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
549
			radeon_pm_set_clocks(rdev);
550
		}
551
		mutex_unlock(&rdev->pm.mutex);
552
 
553
//		cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work);
554
 
555
    }
556
 
557
	radeon_hwmon_fini(rdev);
558
}
559
 
1430 serge 560
void radeon_pm_compute_clocks(struct radeon_device *rdev)
561
{
562
	struct drm_device *ddev = rdev->ddev;
1963 serge 563
	struct drm_crtc *crtc;
1430 serge 564
	struct radeon_crtc *radeon_crtc;
565
 
1963 serge 566
	if (rdev->pm.num_power_states < 2)
1430 serge 567
		return;
568
 
569
	mutex_lock(&rdev->pm.mutex);
570
 
571
	rdev->pm.active_crtcs = 0;
1963 serge 572
	rdev->pm.active_crtc_count = 0;
573
	list_for_each_entry(crtc,
574
		&ddev->mode_config.crtc_list, head) {
575
		radeon_crtc = to_radeon_crtc(crtc);
576
		if (radeon_crtc->enabled) {
1430 serge 577
			rdev->pm.active_crtcs |= (1 << radeon_crtc->crtc_id);
1963 serge 578
			rdev->pm.active_crtc_count++;
1430 serge 579
		}
580
	}
581
 
1963 serge 582
	if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
583
		radeon_pm_update_profile(rdev);
584
		radeon_pm_set_clocks(rdev);
585
	} else if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
586
		if (rdev->pm.dynpm_state != DYNPM_STATE_DISABLED) {
587
			if (rdev->pm.active_crtc_count > 1) {
588
				if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) {
589
//                   cancel_delayed_work(&rdev->pm.dynpm_idle_work);
1430 serge 590
 
1963 serge 591
					rdev->pm.dynpm_state = DYNPM_STATE_PAUSED;
592
					rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
593
					radeon_pm_get_dynpm_state(rdev);
1430 serge 594
				radeon_pm_set_clocks(rdev);
595
 
1963 serge 596
					DRM_DEBUG_DRIVER("radeon: dynamic power management deactivated\n");
1430 serge 597
		}
1963 serge 598
			} else if (rdev->pm.active_crtc_count == 1) {
1430 serge 599
		/* TODO: Increase clocks if needed for current mode */
600
 
1963 serge 601
				if (rdev->pm.dynpm_state == DYNPM_STATE_MINIMUM) {
602
					rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
603
					rdev->pm.dynpm_planned_action = DYNPM_ACTION_UPCLOCK;
604
					radeon_pm_get_dynpm_state(rdev);
1430 serge 605
			radeon_pm_set_clocks(rdev);
1963 serge 606
 
607
//					schedule_delayed_work(&rdev->pm.dynpm_idle_work,
608
//							   msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
609
				} else if (rdev->pm.dynpm_state == DYNPM_STATE_PAUSED) {
610
					rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
611
//					schedule_delayed_work(&rdev->pm.dynpm_idle_work,
612
//							   msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
613
					DRM_DEBUG_DRIVER("radeon: dynamic power management activated\n");
1430 serge 614
        }
1963 serge 615
			} else { /* count == 0 */
616
				if (rdev->pm.dynpm_state != DYNPM_STATE_MINIMUM) {
617
//					cancel_delayed_work(&rdev->pm.dynpm_idle_work);
618
 
619
					rdev->pm.dynpm_state = DYNPM_STATE_MINIMUM;
620
					rdev->pm.dynpm_planned_action = DYNPM_ACTION_MINIMUM;
621
					radeon_pm_get_dynpm_state(rdev);
622
					radeon_pm_set_clocks(rdev);
1430 serge 623
		}
624
	}
625
		}
626
	}
627
 
628
	mutex_unlock(&rdev->pm.mutex);
629
}
630
 
1963 serge 631
static bool radeon_pm_in_vbl(struct radeon_device *rdev)
1430 serge 632
{
1963 serge 633
	int  crtc, vpos, hpos, vbl_status;
1430 serge 634
	bool in_vbl = true;
635
 
1963 serge 636
	/* Iterate over all active crtc's. All crtc's must be in vblank,
637
	 * otherwise return in_vbl == false.
638
	 */
639
	for (crtc = 0; (crtc < rdev->num_crtc) && in_vbl; crtc++) {
640
		if (rdev->pm.active_crtcs & (1 << crtc)) {
641
			vbl_status = radeon_get_crtc_scanoutpos(rdev->ddev, crtc, &vpos, &hpos);
642
			if ((vbl_status & DRM_SCANOUTPOS_VALID) &&
643
			    !(vbl_status & DRM_SCANOUTPOS_INVBL))
1430 serge 644
				in_vbl = false;
645
		}
646
		}
1963 serge 647
 
1430 serge 648
	return in_vbl;
649
}
650
 
1963 serge 651
static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish)
1430 serge 652
{
1963 serge 653
	u32 stat_crtc = 0;
654
	bool in_vbl = radeon_pm_in_vbl(rdev);
1430 serge 655
 
1963 serge 656
	if (in_vbl == false)
657
		DRM_DEBUG_DRIVER("not in vbl for pm change %08x at %s\n", stat_crtc,
658
			 finish ? "exit" : "entry");
659
	return in_vbl;
1430 serge 660
}
661
 
662
 
1268 serge 663
/*
664
 * Debugfs info
665
 */
666
#if defined(CONFIG_DEBUG_FS)
667
 
668
static int radeon_debugfs_pm_info(struct seq_file *m, void *data)
669
{
670
	struct drm_info_node *node = (struct drm_info_node *) m->private;
671
	struct drm_device *dev = node->minor->dev;
672
	struct radeon_device *rdev = dev->dev_private;
673
 
1963 serge 674
	seq_printf(m, "default engine clock: %u0 kHz\n", rdev->pm.default_sclk);
1404 serge 675
	seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev));
1963 serge 676
	seq_printf(m, "default memory clock: %u0 kHz\n", rdev->pm.default_mclk);
1404 serge 677
	if (rdev->asic->get_memory_clock)
678
		seq_printf(m, "current memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev));
1963 serge 679
	if (rdev->pm.current_vddc)
680
		seq_printf(m, "voltage: %u mV\n", rdev->pm.current_vddc);
1430 serge 681
	if (rdev->asic->get_pcie_lanes)
682
		seq_printf(m, "PCIE lanes: %d\n", radeon_get_pcie_lanes(rdev));
1268 serge 683
 
684
	return 0;
685
}
686
 
687
static struct drm_info_list radeon_pm_info_list[] = {
688
	{"radeon_pm_info", radeon_debugfs_pm_info, 0, NULL},
689
};
690
#endif
691
 
1430 serge 692
static int radeon_debugfs_pm_init(struct radeon_device *rdev)
1268 serge 693
{
694
#if defined(CONFIG_DEBUG_FS)
695
	return radeon_debugfs_add_files(rdev, radeon_pm_info_list, ARRAY_SIZE(radeon_pm_info_list));
696
#else
697
	return 0;
698
#endif
699
}