Subversion Repositories Kolibri OS

Rev

Rev 1430 | Rev 1986 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
1268 serge 1
/*
2
 * Permission is hereby granted, free of charge, to any person obtaining a
3
 * copy of this software and associated documentation files (the "Software"),
4
 * to deal in the Software without restriction, including without limitation
5
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
6
 * and/or sell copies of the Software, and to permit persons to whom the
7
 * Software is furnished to do so, subject to the following conditions:
8
 *
9
 * The above copyright notice and this permission notice shall be included in
10
 * all copies or substantial portions of the Software.
11
 *
12
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
13
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
15
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
16
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
17
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
18
 * OTHER DEALINGS IN THE SOFTWARE.
19
 *
20
 * Authors: Rafał Miłecki 
1430 serge 21
 *          Alex Deucher 
1268 serge 22
 */
23
#include "drmP.h"
24
#include "radeon.h"
1430 serge 25
#include "avivod.h"
1268 serge 26
 
1963 serge 27
#define DRM_DEBUG_DRIVER(fmt, args...)
28
 
1430 serge 29
#define RADEON_IDLE_LOOP_MS 100
30
#define RADEON_RECLOCK_DELAY_MS 200
31
#define RADEON_WAIT_VBLANK_TIMEOUT 200
1963 serge 32
#define RADEON_WAIT_IDLE_TIMEOUT 200
1268 serge 33
 
1963 serge 34
static const char *radeon_pm_state_type_name[5] = {
1430 serge 35
	"Default",
36
	"Powersave",
37
	"Battery",
38
	"Balanced",
39
	"Performance",
40
};
41
 
1963 serge 42
static void radeon_dynpm_idle_work_handler(struct work_struct *work);
43
static int radeon_debugfs_pm_init(struct radeon_device *rdev);
44
static bool radeon_pm_in_vbl(struct radeon_device *rdev);
45
static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish);
46
static void radeon_pm_update_profile(struct radeon_device *rdev);
47
static void radeon_pm_set_clocks(struct radeon_device *rdev);
48
 
49
static inline int power_supply_is_system_supplied(void) { return -ENOSYS; }
50
 
51
#define ACPI_AC_CLASS           "ac_adapter"
52
 
53
#ifdef CONFIG_ACPI
54
static int radeon_acpi_event(struct notifier_block *nb,
55
			     unsigned long val,
56
			     void *data)
1430 serge 57
{
1963 serge 58
	struct radeon_device *rdev = container_of(nb, struct radeon_device, acpi_nb);
59
	struct acpi_bus_event *entry = (struct acpi_bus_event *)data;
1430 serge 60
 
1963 serge 61
	if (strcmp(entry->device_class, ACPI_AC_CLASS) == 0) {
62
		if (power_supply_is_system_supplied() > 0)
63
			DRM_DEBUG_DRIVER("pm: AC\n");
1430 serge 64
		else
1963 serge 65
			DRM_DEBUG_DRIVER("pm: DC\n");
66
 
67
		if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
68
			if (rdev->pm.profile == PM_PROFILE_AUTO) {
69
				mutex_lock(&rdev->pm.mutex);
70
				radeon_pm_update_profile(rdev);
71
				radeon_pm_set_clocks(rdev);
72
				mutex_unlock(&rdev->pm.mutex);
1430 serge 73
		}
74
	}
1963 serge 75
	}
76
 
77
	return NOTIFY_OK;
1430 serge 78
}
1963 serge 79
#endif
1430 serge 80
 
1963 serge 81
static void radeon_pm_update_profile(struct radeon_device *rdev)
1430 serge 82
{
1963 serge 83
	switch (rdev->pm.profile) {
84
	case PM_PROFILE_DEFAULT:
85
		rdev->pm.profile_index = PM_PROFILE_DEFAULT_IDX;
1430 serge 86
		break;
1963 serge 87
	case PM_PROFILE_AUTO:
88
		if (power_supply_is_system_supplied() > 0) {
89
			if (rdev->pm.active_crtc_count > 1)
90
				rdev->pm.profile_index = PM_PROFILE_HIGH_MH_IDX;
91
			else
92
				rdev->pm.profile_index = PM_PROFILE_HIGH_SH_IDX;
1430 serge 93
		} else {
1963 serge 94
			if (rdev->pm.active_crtc_count > 1)
95
				rdev->pm.profile_index = PM_PROFILE_MID_MH_IDX;
96
			else
97
				rdev->pm.profile_index = PM_PROFILE_MID_SH_IDX;
1430 serge 98
		}
99
		break;
1963 serge 100
	case PM_PROFILE_LOW:
101
		if (rdev->pm.active_crtc_count > 1)
102
			rdev->pm.profile_index = PM_PROFILE_LOW_MH_IDX;
103
		else
104
			rdev->pm.profile_index = PM_PROFILE_LOW_SH_IDX;
1430 serge 105
		break;
1963 serge 106
	case PM_PROFILE_MID:
107
		if (rdev->pm.active_crtc_count > 1)
108
			rdev->pm.profile_index = PM_PROFILE_MID_MH_IDX;
109
		else
110
			rdev->pm.profile_index = PM_PROFILE_MID_SH_IDX;
111
		break;
112
	case PM_PROFILE_HIGH:
113
		if (rdev->pm.active_crtc_count > 1)
114
			rdev->pm.profile_index = PM_PROFILE_HIGH_MH_IDX;
115
		else
116
			rdev->pm.profile_index = PM_PROFILE_HIGH_SH_IDX;
117
		break;
1430 serge 118
	}
119
 
1963 serge 120
	if (rdev->pm.active_crtc_count == 0) {
121
		rdev->pm.requested_power_state_index =
122
			rdev->pm.profiles[rdev->pm.profile_index].dpms_off_ps_idx;
123
		rdev->pm.requested_clock_mode_index =
124
			rdev->pm.profiles[rdev->pm.profile_index].dpms_off_cm_idx;
125
	} else {
126
		rdev->pm.requested_power_state_index =
127
			rdev->pm.profiles[rdev->pm.profile_index].dpms_on_ps_idx;
128
		rdev->pm.requested_clock_mode_index =
129
			rdev->pm.profiles[rdev->pm.profile_index].dpms_on_cm_idx;
1430 serge 130
		}
1963 serge 131
}
132
 
133
static void radeon_unmap_vram_bos(struct radeon_device *rdev)
134
{
135
	struct radeon_bo *bo, *n;
136
 
137
	if (list_empty(&rdev->gem.objects))
138
		return;
139
 
140
}
141
 
142
 
143
static void radeon_set_power_state(struct radeon_device *rdev)
144
{
145
	u32 sclk, mclk;
146
	bool misc_after = false;
147
 
148
	if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) &&
149
	    (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index))
150
		return;
151
 
152
	if (radeon_gui_idle(rdev)) {
153
		sclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
154
			clock_info[rdev->pm.requested_clock_mode_index].sclk;
155
		if (sclk > rdev->pm.default_sclk)
156
			sclk = rdev->pm.default_sclk;
157
 
158
		mclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
159
			clock_info[rdev->pm.requested_clock_mode_index].mclk;
160
		if (mclk > rdev->pm.default_mclk)
161
			mclk = rdev->pm.default_mclk;
162
 
163
		/* upvolt before raising clocks, downvolt after lowering clocks */
164
		if (sclk < rdev->pm.current_sclk)
165
			misc_after = true;
166
 
167
//       radeon_sync_with_vblank(rdev);
168
 
169
		if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
170
			if (!radeon_pm_in_vbl(rdev))
171
				return;
172
		}
173
 
174
		radeon_pm_prepare(rdev);
175
 
176
		if (!misc_after)
177
			/* voltage, pcie lanes, etc.*/
178
			radeon_pm_misc(rdev);
179
 
180
	/* set engine clock */
181
		if (sclk != rdev->pm.current_sclk) {
182
	radeon_pm_debug_check_in_vbl(rdev, false);
183
			radeon_set_engine_clock(rdev, sclk);
184
	radeon_pm_debug_check_in_vbl(rdev, true);
185
			rdev->pm.current_sclk = sclk;
186
			DRM_DEBUG_DRIVER("Setting: e: %d\n", sclk);
187
		}
188
 
189
	/* set memory clock */
190
		if (rdev->asic->set_memory_clock && (mclk != rdev->pm.current_mclk)) {
191
		radeon_pm_debug_check_in_vbl(rdev, false);
192
			radeon_set_memory_clock(rdev, mclk);
193
		radeon_pm_debug_check_in_vbl(rdev, true);
194
			rdev->pm.current_mclk = mclk;
195
			DRM_DEBUG_DRIVER("Setting: m: %d\n", mclk);
196
		}
197
 
198
		if (misc_after)
199
			/* voltage, pcie lanes, etc.*/
200
			radeon_pm_misc(rdev);
201
 
202
		radeon_pm_finish(rdev);
203
 
204
		rdev->pm.current_power_state_index = rdev->pm.requested_power_state_index;
205
		rdev->pm.current_clock_mode_index = rdev->pm.requested_clock_mode_index;
206
	} else
207
		DRM_DEBUG_DRIVER("pm: GUI not idle!!!\n");
208
}
209
 
210
static void radeon_pm_set_clocks(struct radeon_device *rdev)
211
{
212
	int i;
213
 
214
	/* no need to take locks, etc. if nothing's going to change */
215
	if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) &&
216
	    (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index))
217
		return;
218
 
219
	mutex_lock(&rdev->ddev->struct_mutex);
220
	mutex_lock(&rdev->vram_mutex);
221
	mutex_lock(&rdev->cp.mutex);
222
 
223
	/* gui idle int has issues on older chips it seems */
224
	if (rdev->family >= CHIP_R600) {
225
		if (rdev->irq.installed) {
226
			/* wait for GPU idle */
227
			rdev->pm.gui_idle = false;
228
			rdev->irq.gui_idle = true;
229
        }
230
	} else {
231
		if (rdev->cp.ready) {
232
//           struct radeon_fence *fence;
233
//           radeon_ring_alloc(rdev, 64);
234
//           radeon_fence_create(rdev, &fence);
235
//           radeon_fence_emit(rdev, fence);
236
//           radeon_ring_commit(rdev);
237
//           radeon_fence_wait(fence, false);
238
//           radeon_fence_unref(&fence);
239
		}
1430 serge 240
	}
1963 serge 241
	radeon_unmap_vram_bos(rdev);
1430 serge 242
 
1963 serge 243
	if (rdev->irq.installed) {
244
		for (i = 0; i < rdev->num_crtc; i++) {
245
			if (rdev->pm.active_crtcs & (1 << i)) {
246
				rdev->pm.req_vblank |= (1 << i);
247
//               drm_vblank_get(rdev->ddev, i);
248
			}
249
		}
250
	}
251
 
252
	radeon_set_power_state(rdev);
253
 
254
	if (rdev->irq.installed) {
255
		for (i = 0; i < rdev->num_crtc; i++) {
256
			if (rdev->pm.req_vblank & (1 << i)) {
257
				rdev->pm.req_vblank &= ~(1 << i);
258
//               drm_vblank_put(rdev->ddev, i);
259
			}
260
		}
261
	}
262
 
263
	/* update display watermarks based on new power state */
264
	radeon_update_bandwidth_info(rdev);
265
	if (rdev->pm.active_crtc_count)
266
		radeon_bandwidth_update(rdev);
267
 
268
	rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
269
 
270
	mutex_unlock(&rdev->cp.mutex);
271
	mutex_unlock(&rdev->vram_mutex);
272
	mutex_unlock(&rdev->ddev->struct_mutex);
1430 serge 273
}
274
 
1963 serge 275
static void radeon_pm_print_states(struct radeon_device *rdev)
1430 serge 276
{
1963 serge 277
	int i, j;
278
	struct radeon_power_state *power_state;
279
	struct radeon_pm_clock_info *clock_info;
280
 
281
	DRM_DEBUG_DRIVER("%d Power State(s)\n", rdev->pm.num_power_states);
282
	for (i = 0; i < rdev->pm.num_power_states; i++) {
283
		power_state = &rdev->pm.power_state[i];
284
		DRM_DEBUG_DRIVER("State %d: %s\n", i,
285
			radeon_pm_state_type_name[power_state->type]);
286
		if (i == rdev->pm.default_power_state_index)
287
			DRM_DEBUG_DRIVER("\tDefault");
288
		if ((rdev->flags & RADEON_IS_PCIE) && !(rdev->flags & RADEON_IS_IGP))
289
			DRM_DEBUG_DRIVER("\t%d PCIE Lanes\n", power_state->pcie_lanes);
290
		if (power_state->flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
291
			DRM_DEBUG_DRIVER("\tSingle display only\n");
292
		DRM_DEBUG_DRIVER("\t%d Clock Mode(s)\n", power_state->num_clock_modes);
293
		for (j = 0; j < power_state->num_clock_modes; j++) {
294
			clock_info = &(power_state->clock_info[j]);
295
			if (rdev->flags & RADEON_IS_IGP)
296
				DRM_DEBUG_DRIVER("\t\t%d e: %d%s\n",
297
					j,
298
					clock_info->sclk * 10,
299
					clock_info->flags & RADEON_PM_MODE_NO_DISPLAY ? "\tNo display only" : "");
300
			else
301
				DRM_DEBUG_DRIVER("\t\t%d e: %d\tm: %d\tv: %d%s\n",
302
					j,
303
					clock_info->sclk * 10,
304
					clock_info->mclk * 10,
305
					clock_info->voltage.voltage,
306
					clock_info->flags & RADEON_PM_MODE_NO_DISPLAY ? "\tNo display only" : "");
307
		}
1430 serge 308
	}
1963 serge 309
}
1430 serge 310
 
1963 serge 311
static ssize_t radeon_get_pm_profile(struct device *dev,
312
				     struct device_attribute *attr,
313
				     char *buf)
314
{
315
 
316
    return snprintf(buf, PAGE_SIZE, "%s\n", "default");
1430 serge 317
}
318
 
1963 serge 319
static ssize_t radeon_set_pm_profile(struct device *dev,
320
				     struct device_attribute *attr,
321
				     const char *buf,
322
				     size_t count)
1430 serge 323
{
1963 serge 324
	struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
325
	struct radeon_device *rdev = ddev->dev_private;
326
 
327
	mutex_lock(&rdev->pm.mutex);
328
 
329
    rdev->pm.profile = PM_PROFILE_DEFAULT;
330
 
331
    radeon_pm_update_profile(rdev);
332
    radeon_pm_set_clocks(rdev);
333
fail:
334
	mutex_unlock(&rdev->pm.mutex);
335
 
336
	return count;
337
}
338
 
339
static ssize_t radeon_get_pm_method(struct device *dev,
340
				    struct device_attribute *attr,
341
				    char *buf)
342
{
343
	struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
344
	struct radeon_device *rdev = ddev->dev_private;
345
	int pm = rdev->pm.pm_method;
346
 
347
	return snprintf(buf, PAGE_SIZE, "%s\n",
348
			(pm == PM_METHOD_DYNPM) ? "dynpm" : "profile");
349
}
350
 
351
static ssize_t radeon_set_pm_method(struct device *dev,
352
				    struct device_attribute *attr,
353
				    const char *buf,
354
				    size_t count)
355
{
356
	struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
357
	struct radeon_device *rdev = ddev->dev_private;
358
 
359
 
360
	if (strncmp("dynpm", buf, strlen("dynpm")) == 0) {
361
		mutex_lock(&rdev->pm.mutex);
362
		rdev->pm.pm_method = PM_METHOD_DYNPM;
363
		rdev->pm.dynpm_state = DYNPM_STATE_PAUSED;
364
		rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
365
		mutex_unlock(&rdev->pm.mutex);
366
	} else if (strncmp("profile", buf, strlen("profile")) == 0) {
367
		mutex_lock(&rdev->pm.mutex);
368
		/* disable dynpm */
369
		rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
370
		rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
371
		rdev->pm.pm_method = PM_METHOD_PROFILE;
372
		mutex_unlock(&rdev->pm.mutex);
373
//		cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work);
374
	} else {
375
		DRM_ERROR("invalid power method!\n");
376
		goto fail;
377
	}
378
	radeon_pm_compute_clocks(rdev);
379
fail:
380
	return count;
381
}
382
 
383
static ssize_t radeon_hwmon_show_temp(struct device *dev,
384
				      struct device_attribute *attr,
385
				      char *buf)
386
{
387
	struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
388
	struct radeon_device *rdev = ddev->dev_private;
389
	u32 temp;
390
 
391
	switch (rdev->pm.int_thermal_type) {
392
	case THERMAL_TYPE_RV6XX:
393
		temp = rv6xx_get_temp(rdev);
1430 serge 394
		break;
1963 serge 395
	case THERMAL_TYPE_RV770:
396
		temp = rv770_get_temp(rdev);
1430 serge 397
		break;
1963 serge 398
	case THERMAL_TYPE_EVERGREEN:
399
	case THERMAL_TYPE_NI:
400
		temp = evergreen_get_temp(rdev);
1430 serge 401
		break;
402
	default:
1963 serge 403
		temp = 0;
404
		break;
1430 serge 405
	}
1963 serge 406
 
407
	return snprintf(buf, PAGE_SIZE, "%d\n", temp);
1430 serge 408
}
409
 
1963 serge 410
static ssize_t radeon_hwmon_show_name(struct device *dev,
411
				      struct device_attribute *attr,
412
				      char *buf)
1430 serge 413
{
1963 serge 414
	return sprintf(buf, "radeon\n");
415
}
1430 serge 416
 
1963 serge 417
static int radeon_hwmon_init(struct radeon_device *rdev)
418
{
419
	int err = 0;
1430 serge 420
 
1963 serge 421
	rdev->pm.int_hwmon_dev = NULL;
422
 
423
	return err;
1430 serge 424
}
425
 
1963 serge 426
static void radeon_hwmon_fini(struct radeon_device *rdev)
427
{
428
}
429
 
430
void radeon_pm_suspend(struct radeon_device *rdev)
431
{
432
	mutex_lock(&rdev->pm.mutex);
433
	if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
434
		if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE)
435
			rdev->pm.dynpm_state = DYNPM_STATE_SUSPENDED;
436
	}
437
	mutex_unlock(&rdev->pm.mutex);
438
 
439
//	cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work);
440
}
441
 
442
void radeon_pm_resume(struct radeon_device *rdev)
443
{
444
	/* asic init will reset the default power state */
445
	mutex_lock(&rdev->pm.mutex);
446
	rdev->pm.current_power_state_index = rdev->pm.default_power_state_index;
447
	rdev->pm.current_clock_mode_index = 0;
448
	rdev->pm.current_sclk = rdev->pm.default_sclk;
449
	rdev->pm.current_mclk = rdev->pm.default_mclk;
450
	rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage;
451
	if (rdev->pm.pm_method == PM_METHOD_DYNPM
452
	    && rdev->pm.dynpm_state == DYNPM_STATE_SUSPENDED) {
453
		rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
454
//		schedule_delayed_work(&rdev->pm.dynpm_idle_work,
455
//					msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
456
	}
457
	mutex_unlock(&rdev->pm.mutex);
458
	radeon_pm_compute_clocks(rdev);
459
}
460
 
1268 serge 461
int radeon_pm_init(struct radeon_device *rdev)
462
{
1963 serge 463
	int ret;
1430 serge 464
 
1963 serge 465
	/* default to profile method */
466
	rdev->pm.pm_method = PM_METHOD_PROFILE;
467
	rdev->pm.profile = PM_PROFILE_DEFAULT;
468
	rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
469
	rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
470
	rdev->pm.dynpm_can_upclock = true;
471
	rdev->pm.dynpm_can_downclock = true;
472
	rdev->pm.default_sclk = rdev->clock.default_sclk;
473
	rdev->pm.default_mclk = rdev->clock.default_mclk;
474
	rdev->pm.current_sclk = rdev->clock.default_sclk;
475
	rdev->pm.current_mclk = rdev->clock.default_mclk;
476
	rdev->pm.int_thermal_type = THERMAL_TYPE_NONE;
477
 
1430 serge 478
	if (rdev->bios) {
479
		if (rdev->is_atom_bios)
480
			radeon_atombios_get_power_modes(rdev);
481
		else
482
			radeon_combios_get_power_modes(rdev);
1963 serge 483
		radeon_pm_print_states(rdev);
484
		radeon_pm_init_profile(rdev);
1430 serge 485
	}
486
 
1963 serge 487
	/* set up the internal thermal sensor if applicable */
488
	ret = radeon_hwmon_init(rdev);
489
	if (ret)
490
		return ret;
1268 serge 491
 
1963 serge 492
	if (rdev->pm.num_power_states > 1) {
1430 serge 493
 
1963 serge 494
		DRM_INFO("radeon: power management initialized\n");
1430 serge 495
	}
496
 
1268 serge 497
	return 0;
498
}
499
 
1963 serge 500
void radeon_pm_fini(struct radeon_device *rdev)
501
{
502
	if (rdev->pm.num_power_states > 1) {
503
		mutex_lock(&rdev->pm.mutex);
504
		if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
505
			rdev->pm.profile = PM_PROFILE_DEFAULT;
506
			radeon_pm_update_profile(rdev);
507
			radeon_pm_set_clocks(rdev);
508
		} else if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
509
			/* reset default clocks */
510
			rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
511
			rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
512
			radeon_pm_set_clocks(rdev);
513
		}
514
		mutex_unlock(&rdev->pm.mutex);
515
 
516
//		cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work);
517
 
518
    }
519
 
520
	radeon_hwmon_fini(rdev);
521
}
522
 
1430 serge 523
void radeon_pm_compute_clocks(struct radeon_device *rdev)
524
{
525
	struct drm_device *ddev = rdev->ddev;
1963 serge 526
	struct drm_crtc *crtc;
1430 serge 527
	struct radeon_crtc *radeon_crtc;
528
 
1963 serge 529
	if (rdev->pm.num_power_states < 2)
1430 serge 530
		return;
531
 
532
	mutex_lock(&rdev->pm.mutex);
533
 
534
	rdev->pm.active_crtcs = 0;
1963 serge 535
	rdev->pm.active_crtc_count = 0;
536
	list_for_each_entry(crtc,
537
		&ddev->mode_config.crtc_list, head) {
538
		radeon_crtc = to_radeon_crtc(crtc);
539
		if (radeon_crtc->enabled) {
1430 serge 540
			rdev->pm.active_crtcs |= (1 << radeon_crtc->crtc_id);
1963 serge 541
			rdev->pm.active_crtc_count++;
1430 serge 542
		}
543
	}
544
 
1963 serge 545
	if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
546
		radeon_pm_update_profile(rdev);
547
		radeon_pm_set_clocks(rdev);
548
	} else if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
549
		if (rdev->pm.dynpm_state != DYNPM_STATE_DISABLED) {
550
			if (rdev->pm.active_crtc_count > 1) {
551
				if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) {
552
//                   cancel_delayed_work(&rdev->pm.dynpm_idle_work);
1430 serge 553
 
1963 serge 554
					rdev->pm.dynpm_state = DYNPM_STATE_PAUSED;
555
					rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
556
					radeon_pm_get_dynpm_state(rdev);
1430 serge 557
				radeon_pm_set_clocks(rdev);
558
 
1963 serge 559
					DRM_DEBUG_DRIVER("radeon: dynamic power management deactivated\n");
1430 serge 560
		}
1963 serge 561
			} else if (rdev->pm.active_crtc_count == 1) {
1430 serge 562
		/* TODO: Increase clocks if needed for current mode */
563
 
1963 serge 564
				if (rdev->pm.dynpm_state == DYNPM_STATE_MINIMUM) {
565
					rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
566
					rdev->pm.dynpm_planned_action = DYNPM_ACTION_UPCLOCK;
567
					radeon_pm_get_dynpm_state(rdev);
1430 serge 568
			radeon_pm_set_clocks(rdev);
1963 serge 569
 
570
//					schedule_delayed_work(&rdev->pm.dynpm_idle_work,
571
//							   msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
572
				} else if (rdev->pm.dynpm_state == DYNPM_STATE_PAUSED) {
573
					rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
574
//					schedule_delayed_work(&rdev->pm.dynpm_idle_work,
575
//							   msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
576
					DRM_DEBUG_DRIVER("radeon: dynamic power management activated\n");
1430 serge 577
        }
1963 serge 578
			} else { /* count == 0 */
579
				if (rdev->pm.dynpm_state != DYNPM_STATE_MINIMUM) {
580
//					cancel_delayed_work(&rdev->pm.dynpm_idle_work);
581
 
582
					rdev->pm.dynpm_state = DYNPM_STATE_MINIMUM;
583
					rdev->pm.dynpm_planned_action = DYNPM_ACTION_MINIMUM;
584
					radeon_pm_get_dynpm_state(rdev);
585
					radeon_pm_set_clocks(rdev);
1430 serge 586
		}
587
	}
588
		}
589
	}
590
 
591
	mutex_unlock(&rdev->pm.mutex);
592
}
593
 
1963 serge 594
static bool radeon_pm_in_vbl(struct radeon_device *rdev)
1430 serge 595
{
1963 serge 596
	int  crtc, vpos, hpos, vbl_status;
1430 serge 597
	bool in_vbl = true;
598
 
1963 serge 599
	/* Iterate over all active crtc's. All crtc's must be in vblank,
600
	 * otherwise return in_vbl == false.
601
	 */
602
	for (crtc = 0; (crtc < rdev->num_crtc) && in_vbl; crtc++) {
603
		if (rdev->pm.active_crtcs & (1 << crtc)) {
604
			vbl_status = radeon_get_crtc_scanoutpos(rdev->ddev, crtc, &vpos, &hpos);
605
			if ((vbl_status & DRM_SCANOUTPOS_VALID) &&
606
			    !(vbl_status & DRM_SCANOUTPOS_INVBL))
1430 serge 607
				in_vbl = false;
608
		}
609
		}
1963 serge 610
 
1430 serge 611
	return in_vbl;
612
}
613
 
1963 serge 614
static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish)
1430 serge 615
{
1963 serge 616
	u32 stat_crtc = 0;
617
	bool in_vbl = radeon_pm_in_vbl(rdev);
1430 serge 618
 
1963 serge 619
	if (in_vbl == false)
620
		DRM_DEBUG_DRIVER("not in vbl for pm change %08x at %s\n", stat_crtc,
621
			 finish ? "exit" : "entry");
622
	return in_vbl;
1430 serge 623
}
624
 
625
 
1268 serge 626
/*
627
 * Debugfs info
628
 */
629
#if defined(CONFIG_DEBUG_FS)
630
 
631
static int radeon_debugfs_pm_info(struct seq_file *m, void *data)
632
{
633
	struct drm_info_node *node = (struct drm_info_node *) m->private;
634
	struct drm_device *dev = node->minor->dev;
635
	struct radeon_device *rdev = dev->dev_private;
636
 
1963 serge 637
	seq_printf(m, "default engine clock: %u0 kHz\n", rdev->pm.default_sclk);
1404 serge 638
	seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev));
1963 serge 639
	seq_printf(m, "default memory clock: %u0 kHz\n", rdev->pm.default_mclk);
1404 serge 640
	if (rdev->asic->get_memory_clock)
641
		seq_printf(m, "current memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev));
1963 serge 642
	if (rdev->pm.current_vddc)
643
		seq_printf(m, "voltage: %u mV\n", rdev->pm.current_vddc);
1430 serge 644
	if (rdev->asic->get_pcie_lanes)
645
		seq_printf(m, "PCIE lanes: %d\n", radeon_get_pcie_lanes(rdev));
1268 serge 646
 
647
	return 0;
648
}
649
 
650
static struct drm_info_list radeon_pm_info_list[] = {
651
	{"radeon_pm_info", radeon_debugfs_pm_info, 0, NULL},
652
};
653
#endif
654
 
1430 serge 655
static int radeon_debugfs_pm_init(struct radeon_device *rdev)
1268 serge 656
{
657
#if defined(CONFIG_DEBUG_FS)
658
	return radeon_debugfs_add_files(rdev, radeon_pm_info_list, ARRAY_SIZE(radeon_pm_info_list));
659
#else
660
	return 0;
661
#endif
662
}