Subversion Repositories Kolibri OS

Rev

Rev 1963 | Rev 2997 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
1268 serge 1
/*
2
 * Permission is hereby granted, free of charge, to any person obtaining a
3
 * copy of this software and associated documentation files (the "Software"),
4
 * to deal in the Software without restriction, including without limitation
5
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
6
 * and/or sell copies of the Software, and to permit persons to whom the
7
 * Software is furnished to do so, subject to the following conditions:
8
 *
9
 * The above copyright notice and this permission notice shall be included in
10
 * all copies or substantial portions of the Software.
11
 *
12
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
13
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
15
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
16
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
17
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
18
 * OTHER DEALINGS IN THE SOFTWARE.
19
 *
20
 * Authors: Rafał Miłecki 
1430 serge 21
 *          Alex Deucher 
1268 serge 22
 */
23
#include "drmP.h"
24
#include "radeon.h"
1430 serge 25
#include "avivod.h"
1986 serge 26
#include "atom.h"
1268 serge 27
 
1963 serge 28
#define DRM_DEBUG_DRIVER(fmt, args...)
29
 
1430 serge 30
#define RADEON_IDLE_LOOP_MS 100
31
#define RADEON_RECLOCK_DELAY_MS 200
32
#define RADEON_WAIT_VBLANK_TIMEOUT 200
1963 serge 33
#define RADEON_WAIT_IDLE_TIMEOUT 200
1268 serge 34
 
1963 serge 35
static const char *radeon_pm_state_type_name[5] = {
1430 serge 36
	"Default",
37
	"Powersave",
38
	"Battery",
39
	"Balanced",
40
	"Performance",
41
};
42
 
1963 serge 43
static void radeon_dynpm_idle_work_handler(struct work_struct *work);
44
static int radeon_debugfs_pm_init(struct radeon_device *rdev);
45
static bool radeon_pm_in_vbl(struct radeon_device *rdev);
46
static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish);
47
static void radeon_pm_update_profile(struct radeon_device *rdev);
48
static void radeon_pm_set_clocks(struct radeon_device *rdev);
49
 
50
static inline int power_supply_is_system_supplied(void) { return -ENOSYS; }
51
 
52
#define ACPI_AC_CLASS           "ac_adapter"
53
 
54
#ifdef CONFIG_ACPI
55
static int radeon_acpi_event(struct notifier_block *nb,
56
			     unsigned long val,
57
			     void *data)
1430 serge 58
{
1963 serge 59
	struct radeon_device *rdev = container_of(nb, struct radeon_device, acpi_nb);
60
	struct acpi_bus_event *entry = (struct acpi_bus_event *)data;
1430 serge 61
 
1963 serge 62
	if (strcmp(entry->device_class, ACPI_AC_CLASS) == 0) {
63
		if (power_supply_is_system_supplied() > 0)
64
			DRM_DEBUG_DRIVER("pm: AC\n");
1430 serge 65
		else
1963 serge 66
			DRM_DEBUG_DRIVER("pm: DC\n");
67
 
68
		if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
69
			if (rdev->pm.profile == PM_PROFILE_AUTO) {
70
				mutex_lock(&rdev->pm.mutex);
71
				radeon_pm_update_profile(rdev);
72
				radeon_pm_set_clocks(rdev);
73
				mutex_unlock(&rdev->pm.mutex);
1430 serge 74
		}
75
	}
1963 serge 76
	}
77
 
78
	return NOTIFY_OK;
1430 serge 79
}
1963 serge 80
#endif
1430 serge 81
 
1963 serge 82
static void radeon_pm_update_profile(struct radeon_device *rdev)
1430 serge 83
{
1963 serge 84
	switch (rdev->pm.profile) {
85
	case PM_PROFILE_DEFAULT:
86
		rdev->pm.profile_index = PM_PROFILE_DEFAULT_IDX;
1430 serge 87
		break;
1963 serge 88
	case PM_PROFILE_AUTO:
89
		if (power_supply_is_system_supplied() > 0) {
90
			if (rdev->pm.active_crtc_count > 1)
91
				rdev->pm.profile_index = PM_PROFILE_HIGH_MH_IDX;
92
			else
93
				rdev->pm.profile_index = PM_PROFILE_HIGH_SH_IDX;
1430 serge 94
		} else {
1963 serge 95
			if (rdev->pm.active_crtc_count > 1)
96
				rdev->pm.profile_index = PM_PROFILE_MID_MH_IDX;
97
			else
98
				rdev->pm.profile_index = PM_PROFILE_MID_SH_IDX;
1430 serge 99
		}
100
		break;
1963 serge 101
	case PM_PROFILE_LOW:
102
		if (rdev->pm.active_crtc_count > 1)
103
			rdev->pm.profile_index = PM_PROFILE_LOW_MH_IDX;
104
		else
105
			rdev->pm.profile_index = PM_PROFILE_LOW_SH_IDX;
1430 serge 106
		break;
1963 serge 107
	case PM_PROFILE_MID:
108
		if (rdev->pm.active_crtc_count > 1)
109
			rdev->pm.profile_index = PM_PROFILE_MID_MH_IDX;
110
		else
111
			rdev->pm.profile_index = PM_PROFILE_MID_SH_IDX;
112
		break;
113
	case PM_PROFILE_HIGH:
114
		if (rdev->pm.active_crtc_count > 1)
115
			rdev->pm.profile_index = PM_PROFILE_HIGH_MH_IDX;
116
		else
117
			rdev->pm.profile_index = PM_PROFILE_HIGH_SH_IDX;
118
		break;
1430 serge 119
	}
120
 
1963 serge 121
	if (rdev->pm.active_crtc_count == 0) {
122
		rdev->pm.requested_power_state_index =
123
			rdev->pm.profiles[rdev->pm.profile_index].dpms_off_ps_idx;
124
		rdev->pm.requested_clock_mode_index =
125
			rdev->pm.profiles[rdev->pm.profile_index].dpms_off_cm_idx;
126
	} else {
127
		rdev->pm.requested_power_state_index =
128
			rdev->pm.profiles[rdev->pm.profile_index].dpms_on_ps_idx;
129
		rdev->pm.requested_clock_mode_index =
130
			rdev->pm.profiles[rdev->pm.profile_index].dpms_on_cm_idx;
1430 serge 131
		}
1963 serge 132
}
133
 
134
static void radeon_unmap_vram_bos(struct radeon_device *rdev)
135
{
136
	struct radeon_bo *bo, *n;
137
 
138
	if (list_empty(&rdev->gem.objects))
139
		return;
140
 
141
}
142
 
143
 
144
static void radeon_set_power_state(struct radeon_device *rdev)
145
{
146
	u32 sclk, mclk;
147
	bool misc_after = false;
148
 
149
	if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) &&
150
	    (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index))
151
		return;
152
 
153
	if (radeon_gui_idle(rdev)) {
154
		sclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
155
			clock_info[rdev->pm.requested_clock_mode_index].sclk;
156
		if (sclk > rdev->pm.default_sclk)
157
			sclk = rdev->pm.default_sclk;
158
 
159
		mclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
160
			clock_info[rdev->pm.requested_clock_mode_index].mclk;
161
		if (mclk > rdev->pm.default_mclk)
162
			mclk = rdev->pm.default_mclk;
163
 
164
		/* upvolt before raising clocks, downvolt after lowering clocks */
165
		if (sclk < rdev->pm.current_sclk)
166
			misc_after = true;
167
 
168
//       radeon_sync_with_vblank(rdev);
169
 
170
		if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
171
			if (!radeon_pm_in_vbl(rdev))
172
				return;
173
		}
174
 
175
		radeon_pm_prepare(rdev);
176
 
177
		if (!misc_after)
178
			/* voltage, pcie lanes, etc.*/
179
			radeon_pm_misc(rdev);
180
 
181
	/* set engine clock */
182
		if (sclk != rdev->pm.current_sclk) {
183
	radeon_pm_debug_check_in_vbl(rdev, false);
184
			radeon_set_engine_clock(rdev, sclk);
185
	radeon_pm_debug_check_in_vbl(rdev, true);
186
			rdev->pm.current_sclk = sclk;
187
			DRM_DEBUG_DRIVER("Setting: e: %d\n", sclk);
188
		}
189
 
190
	/* set memory clock */
191
		if (rdev->asic->set_memory_clock && (mclk != rdev->pm.current_mclk)) {
192
		radeon_pm_debug_check_in_vbl(rdev, false);
193
			radeon_set_memory_clock(rdev, mclk);
194
		radeon_pm_debug_check_in_vbl(rdev, true);
195
			rdev->pm.current_mclk = mclk;
196
			DRM_DEBUG_DRIVER("Setting: m: %d\n", mclk);
197
		}
198
 
199
		if (misc_after)
200
			/* voltage, pcie lanes, etc.*/
201
			radeon_pm_misc(rdev);
202
 
203
		radeon_pm_finish(rdev);
204
 
205
		rdev->pm.current_power_state_index = rdev->pm.requested_power_state_index;
206
		rdev->pm.current_clock_mode_index = rdev->pm.requested_clock_mode_index;
207
	} else
208
		DRM_DEBUG_DRIVER("pm: GUI not idle!!!\n");
209
}
210
 
211
static void radeon_pm_set_clocks(struct radeon_device *rdev)
212
{
213
	int i;
214
 
215
	/* no need to take locks, etc. if nothing's going to change */
216
	if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) &&
217
	    (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index))
218
		return;
219
 
220
	mutex_lock(&rdev->ddev->struct_mutex);
221
	mutex_lock(&rdev->vram_mutex);
222
	mutex_lock(&rdev->cp.mutex);
223
 
224
	/* gui idle int has issues on older chips it seems */
225
	if (rdev->family >= CHIP_R600) {
226
		if (rdev->irq.installed) {
227
			/* wait for GPU idle */
228
			rdev->pm.gui_idle = false;
229
			rdev->irq.gui_idle = true;
230
        }
231
	} else {
232
		if (rdev->cp.ready) {
233
//           struct radeon_fence *fence;
234
//           radeon_ring_alloc(rdev, 64);
235
//           radeon_fence_create(rdev, &fence);
236
//           radeon_fence_emit(rdev, fence);
237
//           radeon_ring_commit(rdev);
238
//           radeon_fence_wait(fence, false);
239
//           radeon_fence_unref(&fence);
240
		}
1430 serge 241
	}
1963 serge 242
	radeon_unmap_vram_bos(rdev);
1430 serge 243
 
1963 serge 244
	if (rdev->irq.installed) {
245
		for (i = 0; i < rdev->num_crtc; i++) {
246
			if (rdev->pm.active_crtcs & (1 << i)) {
247
				rdev->pm.req_vblank |= (1 << i);
248
//               drm_vblank_get(rdev->ddev, i);
249
			}
250
		}
251
	}
252
 
253
	radeon_set_power_state(rdev);
254
 
255
	if (rdev->irq.installed) {
256
		for (i = 0; i < rdev->num_crtc; i++) {
257
			if (rdev->pm.req_vblank & (1 << i)) {
258
				rdev->pm.req_vblank &= ~(1 << i);
259
//               drm_vblank_put(rdev->ddev, i);
260
			}
261
		}
262
	}
263
 
264
	/* update display watermarks based on new power state */
265
	radeon_update_bandwidth_info(rdev);
266
	if (rdev->pm.active_crtc_count)
267
		radeon_bandwidth_update(rdev);
268
 
269
	rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
270
 
271
	mutex_unlock(&rdev->cp.mutex);
272
	mutex_unlock(&rdev->vram_mutex);
273
	mutex_unlock(&rdev->ddev->struct_mutex);
1430 serge 274
}
275
 
1963 serge 276
static void radeon_pm_print_states(struct radeon_device *rdev)
1430 serge 277
{
1963 serge 278
	int i, j;
279
	struct radeon_power_state *power_state;
280
	struct radeon_pm_clock_info *clock_info;
281
 
282
	DRM_DEBUG_DRIVER("%d Power State(s)\n", rdev->pm.num_power_states);
283
	for (i = 0; i < rdev->pm.num_power_states; i++) {
284
		power_state = &rdev->pm.power_state[i];
285
		DRM_DEBUG_DRIVER("State %d: %s\n", i,
286
			radeon_pm_state_type_name[power_state->type]);
287
		if (i == rdev->pm.default_power_state_index)
288
			DRM_DEBUG_DRIVER("\tDefault");
289
		if ((rdev->flags & RADEON_IS_PCIE) && !(rdev->flags & RADEON_IS_IGP))
290
			DRM_DEBUG_DRIVER("\t%d PCIE Lanes\n", power_state->pcie_lanes);
291
		if (power_state->flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
292
			DRM_DEBUG_DRIVER("\tSingle display only\n");
293
		DRM_DEBUG_DRIVER("\t%d Clock Mode(s)\n", power_state->num_clock_modes);
294
		for (j = 0; j < power_state->num_clock_modes; j++) {
295
			clock_info = &(power_state->clock_info[j]);
296
			if (rdev->flags & RADEON_IS_IGP)
297
				DRM_DEBUG_DRIVER("\t\t%d e: %d%s\n",
298
					j,
299
					clock_info->sclk * 10,
300
					clock_info->flags & RADEON_PM_MODE_NO_DISPLAY ? "\tNo display only" : "");
301
			else
302
				DRM_DEBUG_DRIVER("\t\t%d e: %d\tm: %d\tv: %d%s\n",
303
					j,
304
					clock_info->sclk * 10,
305
					clock_info->mclk * 10,
306
					clock_info->voltage.voltage,
307
					clock_info->flags & RADEON_PM_MODE_NO_DISPLAY ? "\tNo display only" : "");
308
		}
1430 serge 309
	}
1963 serge 310
}
1430 serge 311
 
1963 serge 312
static ssize_t radeon_get_pm_profile(struct device *dev,
313
				     struct device_attribute *attr,
314
				     char *buf)
315
{
316
 
317
    return snprintf(buf, PAGE_SIZE, "%s\n", "default");
1430 serge 318
}
319
 
1963 serge 320
static ssize_t radeon_set_pm_profile(struct device *dev,
321
				     struct device_attribute *attr,
322
				     const char *buf,
323
				     size_t count)
1430 serge 324
{
1963 serge 325
	struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
326
	struct radeon_device *rdev = ddev->dev_private;
327
 
328
	mutex_lock(&rdev->pm.mutex);
329
 
330
    rdev->pm.profile = PM_PROFILE_DEFAULT;
331
 
332
    radeon_pm_update_profile(rdev);
333
    radeon_pm_set_clocks(rdev);
334
fail:
335
	mutex_unlock(&rdev->pm.mutex);
336
 
337
	return count;
338
}
339
 
340
static ssize_t radeon_get_pm_method(struct device *dev,
341
				    struct device_attribute *attr,
342
				    char *buf)
343
{
344
	struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
345
	struct radeon_device *rdev = ddev->dev_private;
346
	int pm = rdev->pm.pm_method;
347
 
348
	return snprintf(buf, PAGE_SIZE, "%s\n",
349
			(pm == PM_METHOD_DYNPM) ? "dynpm" : "profile");
350
}
351
 
352
static ssize_t radeon_set_pm_method(struct device *dev,
353
				    struct device_attribute *attr,
354
				    const char *buf,
355
				    size_t count)
356
{
357
	struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
358
	struct radeon_device *rdev = ddev->dev_private;
359
 
360
 
361
	if (strncmp("dynpm", buf, strlen("dynpm")) == 0) {
362
		mutex_lock(&rdev->pm.mutex);
363
		rdev->pm.pm_method = PM_METHOD_DYNPM;
364
		rdev->pm.dynpm_state = DYNPM_STATE_PAUSED;
365
		rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
366
		mutex_unlock(&rdev->pm.mutex);
367
	} else if (strncmp("profile", buf, strlen("profile")) == 0) {
368
		mutex_lock(&rdev->pm.mutex);
369
		/* disable dynpm */
370
		rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
371
		rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
372
		rdev->pm.pm_method = PM_METHOD_PROFILE;
373
		mutex_unlock(&rdev->pm.mutex);
374
//		cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work);
375
	} else {
376
		DRM_ERROR("invalid power method!\n");
377
		goto fail;
378
	}
379
	radeon_pm_compute_clocks(rdev);
380
fail:
381
	return count;
382
}
383
 
384
static ssize_t radeon_hwmon_show_temp(struct device *dev,
385
				      struct device_attribute *attr,
386
				      char *buf)
387
{
388
	struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
389
	struct radeon_device *rdev = ddev->dev_private;
390
	u32 temp;
391
 
392
	switch (rdev->pm.int_thermal_type) {
393
	case THERMAL_TYPE_RV6XX:
394
		temp = rv6xx_get_temp(rdev);
1430 serge 395
		break;
1963 serge 396
	case THERMAL_TYPE_RV770:
397
		temp = rv770_get_temp(rdev);
1430 serge 398
		break;
1963 serge 399
	case THERMAL_TYPE_EVERGREEN:
400
	case THERMAL_TYPE_NI:
401
		temp = evergreen_get_temp(rdev);
1430 serge 402
		break;
403
	default:
1963 serge 404
		temp = 0;
405
		break;
1430 serge 406
	}
1963 serge 407
 
408
	return snprintf(buf, PAGE_SIZE, "%d\n", temp);
1430 serge 409
}
410
 
1963 serge 411
static ssize_t radeon_hwmon_show_name(struct device *dev,
412
				      struct device_attribute *attr,
413
				      char *buf)
1430 serge 414
{
1963 serge 415
	return sprintf(buf, "radeon\n");
416
}
1430 serge 417
 
1963 serge 418
static int radeon_hwmon_init(struct radeon_device *rdev)
419
{
420
	int err = 0;
1430 serge 421
 
1963 serge 422
	rdev->pm.int_hwmon_dev = NULL;
423
 
424
	return err;
1430 serge 425
}
426
 
1963 serge 427
static void radeon_hwmon_fini(struct radeon_device *rdev)
428
{
429
}
430
 
431
void radeon_pm_suspend(struct radeon_device *rdev)
432
{
433
	mutex_lock(&rdev->pm.mutex);
434
	if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
435
		if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE)
436
			rdev->pm.dynpm_state = DYNPM_STATE_SUSPENDED;
437
	}
438
	mutex_unlock(&rdev->pm.mutex);
439
 
440
//	cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work);
441
}
442
 
443
void radeon_pm_resume(struct radeon_device *rdev)
444
{
445
	/* asic init will reset the default power state */
446
	mutex_lock(&rdev->pm.mutex);
447
	rdev->pm.current_power_state_index = rdev->pm.default_power_state_index;
448
	rdev->pm.current_clock_mode_index = 0;
449
	rdev->pm.current_sclk = rdev->pm.default_sclk;
450
	rdev->pm.current_mclk = rdev->pm.default_mclk;
451
	rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage;
452
	if (rdev->pm.pm_method == PM_METHOD_DYNPM
453
	    && rdev->pm.dynpm_state == DYNPM_STATE_SUSPENDED) {
454
		rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
455
//		schedule_delayed_work(&rdev->pm.dynpm_idle_work,
456
//					msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
457
	}
458
	mutex_unlock(&rdev->pm.mutex);
459
	radeon_pm_compute_clocks(rdev);
460
}
461
 
1268 serge 462
int radeon_pm_init(struct radeon_device *rdev)
463
{
1963 serge 464
	int ret;
1430 serge 465
 
1963 serge 466
	/* default to profile method */
467
	rdev->pm.pm_method = PM_METHOD_PROFILE;
468
	rdev->pm.profile = PM_PROFILE_DEFAULT;
469
	rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
470
	rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
471
	rdev->pm.dynpm_can_upclock = true;
472
	rdev->pm.dynpm_can_downclock = true;
473
	rdev->pm.default_sclk = rdev->clock.default_sclk;
474
	rdev->pm.default_mclk = rdev->clock.default_mclk;
475
	rdev->pm.current_sclk = rdev->clock.default_sclk;
476
	rdev->pm.current_mclk = rdev->clock.default_mclk;
477
	rdev->pm.int_thermal_type = THERMAL_TYPE_NONE;
478
 
1430 serge 479
	if (rdev->bios) {
480
		if (rdev->is_atom_bios)
481
			radeon_atombios_get_power_modes(rdev);
482
		else
483
			radeon_combios_get_power_modes(rdev);
1963 serge 484
		radeon_pm_print_states(rdev);
485
		radeon_pm_init_profile(rdev);
1430 serge 486
	}
487
 
1963 serge 488
	/* set up the internal thermal sensor if applicable */
489
	ret = radeon_hwmon_init(rdev);
490
	if (ret)
491
		return ret;
1268 serge 492
 
1963 serge 493
	if (rdev->pm.num_power_states > 1) {
1430 serge 494
 
1963 serge 495
		DRM_INFO("radeon: power management initialized\n");
1430 serge 496
	}
497
 
1268 serge 498
	return 0;
499
}
500
 
1963 serge 501
void radeon_pm_fini(struct radeon_device *rdev)
502
{
503
	if (rdev->pm.num_power_states > 1) {
504
		mutex_lock(&rdev->pm.mutex);
505
		if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
506
			rdev->pm.profile = PM_PROFILE_DEFAULT;
507
			radeon_pm_update_profile(rdev);
508
			radeon_pm_set_clocks(rdev);
509
		} else if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
510
			/* reset default clocks */
511
			rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
512
			rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
513
			radeon_pm_set_clocks(rdev);
514
		}
515
		mutex_unlock(&rdev->pm.mutex);
516
 
517
//		cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work);
518
 
519
    }
520
 
521
	radeon_hwmon_fini(rdev);
522
}
523
 
1430 serge 524
void radeon_pm_compute_clocks(struct radeon_device *rdev)
525
{
526
	struct drm_device *ddev = rdev->ddev;
1963 serge 527
	struct drm_crtc *crtc;
1430 serge 528
	struct radeon_crtc *radeon_crtc;
529
 
1963 serge 530
	if (rdev->pm.num_power_states < 2)
1430 serge 531
		return;
532
 
533
	mutex_lock(&rdev->pm.mutex);
534
 
535
	rdev->pm.active_crtcs = 0;
1963 serge 536
	rdev->pm.active_crtc_count = 0;
537
	list_for_each_entry(crtc,
538
		&ddev->mode_config.crtc_list, head) {
539
		radeon_crtc = to_radeon_crtc(crtc);
540
		if (radeon_crtc->enabled) {
1430 serge 541
			rdev->pm.active_crtcs |= (1 << radeon_crtc->crtc_id);
1963 serge 542
			rdev->pm.active_crtc_count++;
1430 serge 543
		}
544
	}
545
 
1963 serge 546
	if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
547
		radeon_pm_update_profile(rdev);
548
		radeon_pm_set_clocks(rdev);
549
	} else if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
550
		if (rdev->pm.dynpm_state != DYNPM_STATE_DISABLED) {
551
			if (rdev->pm.active_crtc_count > 1) {
552
				if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) {
553
//                   cancel_delayed_work(&rdev->pm.dynpm_idle_work);
1430 serge 554
 
1963 serge 555
					rdev->pm.dynpm_state = DYNPM_STATE_PAUSED;
556
					rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
557
					radeon_pm_get_dynpm_state(rdev);
1430 serge 558
				radeon_pm_set_clocks(rdev);
559
 
1963 serge 560
					DRM_DEBUG_DRIVER("radeon: dynamic power management deactivated\n");
1430 serge 561
		}
1963 serge 562
			} else if (rdev->pm.active_crtc_count == 1) {
1430 serge 563
		/* TODO: Increase clocks if needed for current mode */
564
 
1963 serge 565
				if (rdev->pm.dynpm_state == DYNPM_STATE_MINIMUM) {
566
					rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
567
					rdev->pm.dynpm_planned_action = DYNPM_ACTION_UPCLOCK;
568
					radeon_pm_get_dynpm_state(rdev);
1430 serge 569
			radeon_pm_set_clocks(rdev);
1963 serge 570
 
571
//					schedule_delayed_work(&rdev->pm.dynpm_idle_work,
572
//							   msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
573
				} else if (rdev->pm.dynpm_state == DYNPM_STATE_PAUSED) {
574
					rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
575
//					schedule_delayed_work(&rdev->pm.dynpm_idle_work,
576
//							   msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
577
					DRM_DEBUG_DRIVER("radeon: dynamic power management activated\n");
1430 serge 578
        }
1963 serge 579
			} else { /* count == 0 */
580
				if (rdev->pm.dynpm_state != DYNPM_STATE_MINIMUM) {
581
//					cancel_delayed_work(&rdev->pm.dynpm_idle_work);
582
 
583
					rdev->pm.dynpm_state = DYNPM_STATE_MINIMUM;
584
					rdev->pm.dynpm_planned_action = DYNPM_ACTION_MINIMUM;
585
					radeon_pm_get_dynpm_state(rdev);
586
					radeon_pm_set_clocks(rdev);
1430 serge 587
		}
588
	}
589
		}
590
	}
591
 
592
	mutex_unlock(&rdev->pm.mutex);
593
}
594
 
1963 serge 595
static bool radeon_pm_in_vbl(struct radeon_device *rdev)
1430 serge 596
{
1963 serge 597
	int  crtc, vpos, hpos, vbl_status;
1430 serge 598
	bool in_vbl = true;
599
 
1963 serge 600
	/* Iterate over all active crtc's. All crtc's must be in vblank,
601
	 * otherwise return in_vbl == false.
602
	 */
603
	for (crtc = 0; (crtc < rdev->num_crtc) && in_vbl; crtc++) {
604
		if (rdev->pm.active_crtcs & (1 << crtc)) {
605
			vbl_status = radeon_get_crtc_scanoutpos(rdev->ddev, crtc, &vpos, &hpos);
606
			if ((vbl_status & DRM_SCANOUTPOS_VALID) &&
607
			    !(vbl_status & DRM_SCANOUTPOS_INVBL))
1430 serge 608
				in_vbl = false;
609
		}
610
		}
1963 serge 611
 
1430 serge 612
	return in_vbl;
613
}
614
 
1963 serge 615
static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish)
1430 serge 616
{
1963 serge 617
	u32 stat_crtc = 0;
618
	bool in_vbl = radeon_pm_in_vbl(rdev);
1430 serge 619
 
1963 serge 620
	if (in_vbl == false)
621
		DRM_DEBUG_DRIVER("not in vbl for pm change %08x at %s\n", stat_crtc,
622
			 finish ? "exit" : "entry");
623
	return in_vbl;
1430 serge 624
}
625
 
626
 
1268 serge 627
/*
628
 * Debugfs info
629
 */
630
#if defined(CONFIG_DEBUG_FS)
631
 
632
static int radeon_debugfs_pm_info(struct seq_file *m, void *data)
633
{
634
	struct drm_info_node *node = (struct drm_info_node *) m->private;
635
	struct drm_device *dev = node->minor->dev;
636
	struct radeon_device *rdev = dev->dev_private;
637
 
1963 serge 638
	seq_printf(m, "default engine clock: %u0 kHz\n", rdev->pm.default_sclk);
1404 serge 639
	seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev));
1963 serge 640
	seq_printf(m, "default memory clock: %u0 kHz\n", rdev->pm.default_mclk);
1404 serge 641
	if (rdev->asic->get_memory_clock)
642
		seq_printf(m, "current memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev));
1963 serge 643
	if (rdev->pm.current_vddc)
644
		seq_printf(m, "voltage: %u mV\n", rdev->pm.current_vddc);
1430 serge 645
	if (rdev->asic->get_pcie_lanes)
646
		seq_printf(m, "PCIE lanes: %d\n", radeon_get_pcie_lanes(rdev));
1268 serge 647
 
648
	return 0;
649
}
650
 
651
static struct drm_info_list radeon_pm_info_list[] = {
652
	{"radeon_pm_info", radeon_debugfs_pm_info, 0, NULL},
653
};
654
#endif
655
 
1430 serge 656
static int radeon_debugfs_pm_init(struct radeon_device *rdev)
1268 serge 657
{
658
#if defined(CONFIG_DEBUG_FS)
659
	return radeon_debugfs_add_files(rdev, radeon_pm_info_list, ARRAY_SIZE(radeon_pm_info_list));
660
#else
661
	return 0;
662
#endif
663
}