Subversion Repositories Kolibri OS

Rev

Rev 1963 | Rev 2997 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 1963 Rev 1986
1
/*
1
/*
2
 * Permission is hereby granted, free of charge, to any person obtaining a
2
 * Permission is hereby granted, free of charge, to any person obtaining a
3
 * copy of this software and associated documentation files (the "Software"),
3
 * copy of this software and associated documentation files (the "Software"),
4
 * to deal in the Software without restriction, including without limitation
4
 * to deal in the Software without restriction, including without limitation
5
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
5
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
6
 * and/or sell copies of the Software, and to permit persons to whom the
6
 * and/or sell copies of the Software, and to permit persons to whom the
7
 * Software is furnished to do so, subject to the following conditions:
7
 * Software is furnished to do so, subject to the following conditions:
8
 *
8
 *
9
 * The above copyright notice and this permission notice shall be included in
9
 * The above copyright notice and this permission notice shall be included in
10
 * all copies or substantial portions of the Software.
10
 * all copies or substantial portions of the Software.
11
 *
11
 *
12
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
12
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
13
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
14
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
15
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
15
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
16
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
16
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
17
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
17
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
18
 * OTHER DEALINGS IN THE SOFTWARE.
18
 * OTHER DEALINGS IN THE SOFTWARE.
19
 *
19
 *
20
 * Authors: Rafał Miłecki 
20
 * Authors: Rafał Miłecki 
21
 *          Alex Deucher 
21
 *          Alex Deucher 
22
 */
22
 */
23
#include "drmP.h"
23
#include "drmP.h"
24
#include "radeon.h"
24
#include "radeon.h"
25
#include "avivod.h"
25
#include "avivod.h"
-
 
26
#include "atom.h"
26
 
27
 
27
#define DRM_DEBUG_DRIVER(fmt, args...)
28
#define DRM_DEBUG_DRIVER(fmt, args...)
28
 
29
 
29
#define RADEON_IDLE_LOOP_MS 100
30
#define RADEON_IDLE_LOOP_MS 100
30
#define RADEON_RECLOCK_DELAY_MS 200
31
#define RADEON_RECLOCK_DELAY_MS 200
31
#define RADEON_WAIT_VBLANK_TIMEOUT 200
32
#define RADEON_WAIT_VBLANK_TIMEOUT 200
32
#define RADEON_WAIT_IDLE_TIMEOUT 200
33
#define RADEON_WAIT_IDLE_TIMEOUT 200
33
 
34
 
34
static const char *radeon_pm_state_type_name[5] = {
35
static const char *radeon_pm_state_type_name[5] = {
35
	"Default",
36
	"Default",
36
	"Powersave",
37
	"Powersave",
37
	"Battery",
38
	"Battery",
38
	"Balanced",
39
	"Balanced",
39
	"Performance",
40
	"Performance",
40
};
41
};
41
 
42
 
42
static void radeon_dynpm_idle_work_handler(struct work_struct *work);
43
static void radeon_dynpm_idle_work_handler(struct work_struct *work);
43
static int radeon_debugfs_pm_init(struct radeon_device *rdev);
44
static int radeon_debugfs_pm_init(struct radeon_device *rdev);
44
static bool radeon_pm_in_vbl(struct radeon_device *rdev);
45
static bool radeon_pm_in_vbl(struct radeon_device *rdev);
45
static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish);
46
static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish);
46
static void radeon_pm_update_profile(struct radeon_device *rdev);
47
static void radeon_pm_update_profile(struct radeon_device *rdev);
47
static void radeon_pm_set_clocks(struct radeon_device *rdev);
48
static void radeon_pm_set_clocks(struct radeon_device *rdev);
48
 
49
 
49
static inline int power_supply_is_system_supplied(void) { return -ENOSYS; }
50
static inline int power_supply_is_system_supplied(void) { return -ENOSYS; }
50
 
51
 
51
#define ACPI_AC_CLASS           "ac_adapter"
52
#define ACPI_AC_CLASS           "ac_adapter"
52
 
53
 
53
#ifdef CONFIG_ACPI
54
#ifdef CONFIG_ACPI
54
static int radeon_acpi_event(struct notifier_block *nb,
55
static int radeon_acpi_event(struct notifier_block *nb,
55
			     unsigned long val,
56
			     unsigned long val,
56
			     void *data)
57
			     void *data)
57
{
58
{
58
	struct radeon_device *rdev = container_of(nb, struct radeon_device, acpi_nb);
59
	struct radeon_device *rdev = container_of(nb, struct radeon_device, acpi_nb);
59
	struct acpi_bus_event *entry = (struct acpi_bus_event *)data;
60
	struct acpi_bus_event *entry = (struct acpi_bus_event *)data;
60
 
61
 
61
	if (strcmp(entry->device_class, ACPI_AC_CLASS) == 0) {
62
	if (strcmp(entry->device_class, ACPI_AC_CLASS) == 0) {
62
		if (power_supply_is_system_supplied() > 0)
63
		if (power_supply_is_system_supplied() > 0)
63
			DRM_DEBUG_DRIVER("pm: AC\n");
64
			DRM_DEBUG_DRIVER("pm: AC\n");
64
		else
65
		else
65
			DRM_DEBUG_DRIVER("pm: DC\n");
66
			DRM_DEBUG_DRIVER("pm: DC\n");
66
 
67
 
67
		if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
68
		if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
68
			if (rdev->pm.profile == PM_PROFILE_AUTO) {
69
			if (rdev->pm.profile == PM_PROFILE_AUTO) {
69
				mutex_lock(&rdev->pm.mutex);
70
				mutex_lock(&rdev->pm.mutex);
70
				radeon_pm_update_profile(rdev);
71
				radeon_pm_update_profile(rdev);
71
				radeon_pm_set_clocks(rdev);
72
				radeon_pm_set_clocks(rdev);
72
				mutex_unlock(&rdev->pm.mutex);
73
				mutex_unlock(&rdev->pm.mutex);
73
		}
74
		}
74
	}
75
	}
75
	}
76
	}
76
 
77
 
77
	return NOTIFY_OK;
78
	return NOTIFY_OK;
78
}
79
}
79
#endif
80
#endif
80
 
81
 
81
static void radeon_pm_update_profile(struct radeon_device *rdev)
82
static void radeon_pm_update_profile(struct radeon_device *rdev)
82
{
83
{
83
	switch (rdev->pm.profile) {
84
	switch (rdev->pm.profile) {
84
	case PM_PROFILE_DEFAULT:
85
	case PM_PROFILE_DEFAULT:
85
		rdev->pm.profile_index = PM_PROFILE_DEFAULT_IDX;
86
		rdev->pm.profile_index = PM_PROFILE_DEFAULT_IDX;
86
		break;
87
		break;
87
	case PM_PROFILE_AUTO:
88
	case PM_PROFILE_AUTO:
88
		if (power_supply_is_system_supplied() > 0) {
89
		if (power_supply_is_system_supplied() > 0) {
89
			if (rdev->pm.active_crtc_count > 1)
90
			if (rdev->pm.active_crtc_count > 1)
90
				rdev->pm.profile_index = PM_PROFILE_HIGH_MH_IDX;
91
				rdev->pm.profile_index = PM_PROFILE_HIGH_MH_IDX;
91
			else
92
			else
92
				rdev->pm.profile_index = PM_PROFILE_HIGH_SH_IDX;
93
				rdev->pm.profile_index = PM_PROFILE_HIGH_SH_IDX;
93
		} else {
94
		} else {
94
			if (rdev->pm.active_crtc_count > 1)
95
			if (rdev->pm.active_crtc_count > 1)
95
				rdev->pm.profile_index = PM_PROFILE_MID_MH_IDX;
96
				rdev->pm.profile_index = PM_PROFILE_MID_MH_IDX;
96
			else
97
			else
97
				rdev->pm.profile_index = PM_PROFILE_MID_SH_IDX;
98
				rdev->pm.profile_index = PM_PROFILE_MID_SH_IDX;
98
		}
99
		}
99
		break;
100
		break;
100
	case PM_PROFILE_LOW:
101
	case PM_PROFILE_LOW:
101
		if (rdev->pm.active_crtc_count > 1)
102
		if (rdev->pm.active_crtc_count > 1)
102
			rdev->pm.profile_index = PM_PROFILE_LOW_MH_IDX;
103
			rdev->pm.profile_index = PM_PROFILE_LOW_MH_IDX;
103
		else
104
		else
104
			rdev->pm.profile_index = PM_PROFILE_LOW_SH_IDX;
105
			rdev->pm.profile_index = PM_PROFILE_LOW_SH_IDX;
105
		break;
106
		break;
106
	case PM_PROFILE_MID:
107
	case PM_PROFILE_MID:
107
		if (rdev->pm.active_crtc_count > 1)
108
		if (rdev->pm.active_crtc_count > 1)
108
			rdev->pm.profile_index = PM_PROFILE_MID_MH_IDX;
109
			rdev->pm.profile_index = PM_PROFILE_MID_MH_IDX;
109
		else
110
		else
110
			rdev->pm.profile_index = PM_PROFILE_MID_SH_IDX;
111
			rdev->pm.profile_index = PM_PROFILE_MID_SH_IDX;
111
		break;
112
		break;
112
	case PM_PROFILE_HIGH:
113
	case PM_PROFILE_HIGH:
113
		if (rdev->pm.active_crtc_count > 1)
114
		if (rdev->pm.active_crtc_count > 1)
114
			rdev->pm.profile_index = PM_PROFILE_HIGH_MH_IDX;
115
			rdev->pm.profile_index = PM_PROFILE_HIGH_MH_IDX;
115
		else
116
		else
116
			rdev->pm.profile_index = PM_PROFILE_HIGH_SH_IDX;
117
			rdev->pm.profile_index = PM_PROFILE_HIGH_SH_IDX;
117
		break;
118
		break;
118
	}
119
	}
119
 
120
 
120
	if (rdev->pm.active_crtc_count == 0) {
121
	if (rdev->pm.active_crtc_count == 0) {
121
		rdev->pm.requested_power_state_index =
122
		rdev->pm.requested_power_state_index =
122
			rdev->pm.profiles[rdev->pm.profile_index].dpms_off_ps_idx;
123
			rdev->pm.profiles[rdev->pm.profile_index].dpms_off_ps_idx;
123
		rdev->pm.requested_clock_mode_index =
124
		rdev->pm.requested_clock_mode_index =
124
			rdev->pm.profiles[rdev->pm.profile_index].dpms_off_cm_idx;
125
			rdev->pm.profiles[rdev->pm.profile_index].dpms_off_cm_idx;
125
	} else {
126
	} else {
126
		rdev->pm.requested_power_state_index =
127
		rdev->pm.requested_power_state_index =
127
			rdev->pm.profiles[rdev->pm.profile_index].dpms_on_ps_idx;
128
			rdev->pm.profiles[rdev->pm.profile_index].dpms_on_ps_idx;
128
		rdev->pm.requested_clock_mode_index =
129
		rdev->pm.requested_clock_mode_index =
129
			rdev->pm.profiles[rdev->pm.profile_index].dpms_on_cm_idx;
130
			rdev->pm.profiles[rdev->pm.profile_index].dpms_on_cm_idx;
130
		}
131
		}
131
}
132
}
132
 
133
 
133
static void radeon_unmap_vram_bos(struct radeon_device *rdev)
134
static void radeon_unmap_vram_bos(struct radeon_device *rdev)
134
{
135
{
135
	struct radeon_bo *bo, *n;
136
	struct radeon_bo *bo, *n;
136
 
137
 
137
	if (list_empty(&rdev->gem.objects))
138
	if (list_empty(&rdev->gem.objects))
138
		return;
139
		return;
139
 
140
 
140
}
141
}
141
 
142
 
142
 
143
 
143
static void radeon_set_power_state(struct radeon_device *rdev)
144
static void radeon_set_power_state(struct radeon_device *rdev)
144
{
145
{
145
	u32 sclk, mclk;
146
	u32 sclk, mclk;
146
	bool misc_after = false;
147
	bool misc_after = false;
147
 
148
 
148
	if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) &&
149
	if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) &&
149
	    (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index))
150
	    (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index))
150
		return;
151
		return;
151
 
152
 
152
	if (radeon_gui_idle(rdev)) {
153
	if (radeon_gui_idle(rdev)) {
153
		sclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
154
		sclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
154
			clock_info[rdev->pm.requested_clock_mode_index].sclk;
155
			clock_info[rdev->pm.requested_clock_mode_index].sclk;
155
		if (sclk > rdev->pm.default_sclk)
156
		if (sclk > rdev->pm.default_sclk)
156
			sclk = rdev->pm.default_sclk;
157
			sclk = rdev->pm.default_sclk;
157
 
158
 
158
		mclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
159
		mclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
159
			clock_info[rdev->pm.requested_clock_mode_index].mclk;
160
			clock_info[rdev->pm.requested_clock_mode_index].mclk;
160
		if (mclk > rdev->pm.default_mclk)
161
		if (mclk > rdev->pm.default_mclk)
161
			mclk = rdev->pm.default_mclk;
162
			mclk = rdev->pm.default_mclk;
162
 
163
 
163
		/* upvolt before raising clocks, downvolt after lowering clocks */
164
		/* upvolt before raising clocks, downvolt after lowering clocks */
164
		if (sclk < rdev->pm.current_sclk)
165
		if (sclk < rdev->pm.current_sclk)
165
			misc_after = true;
166
			misc_after = true;
166
 
167
 
167
//       radeon_sync_with_vblank(rdev);
168
//       radeon_sync_with_vblank(rdev);
168
 
169
 
169
		if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
170
		if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
170
			if (!radeon_pm_in_vbl(rdev))
171
			if (!radeon_pm_in_vbl(rdev))
171
				return;
172
				return;
172
		}
173
		}
173
 
174
 
174
		radeon_pm_prepare(rdev);
175
		radeon_pm_prepare(rdev);
175
 
176
 
176
		if (!misc_after)
177
		if (!misc_after)
177
			/* voltage, pcie lanes, etc.*/
178
			/* voltage, pcie lanes, etc.*/
178
			radeon_pm_misc(rdev);
179
			radeon_pm_misc(rdev);
179
 
180
 
180
	/* set engine clock */
181
	/* set engine clock */
181
		if (sclk != rdev->pm.current_sclk) {
182
		if (sclk != rdev->pm.current_sclk) {
182
	radeon_pm_debug_check_in_vbl(rdev, false);
183
	radeon_pm_debug_check_in_vbl(rdev, false);
183
			radeon_set_engine_clock(rdev, sclk);
184
			radeon_set_engine_clock(rdev, sclk);
184
	radeon_pm_debug_check_in_vbl(rdev, true);
185
	radeon_pm_debug_check_in_vbl(rdev, true);
185
			rdev->pm.current_sclk = sclk;
186
			rdev->pm.current_sclk = sclk;
186
			DRM_DEBUG_DRIVER("Setting: e: %d\n", sclk);
187
			DRM_DEBUG_DRIVER("Setting: e: %d\n", sclk);
187
		}
188
		}
188
 
189
 
189
	/* set memory clock */
190
	/* set memory clock */
190
		if (rdev->asic->set_memory_clock && (mclk != rdev->pm.current_mclk)) {
191
		if (rdev->asic->set_memory_clock && (mclk != rdev->pm.current_mclk)) {
191
		radeon_pm_debug_check_in_vbl(rdev, false);
192
		radeon_pm_debug_check_in_vbl(rdev, false);
192
			radeon_set_memory_clock(rdev, mclk);
193
			radeon_set_memory_clock(rdev, mclk);
193
		radeon_pm_debug_check_in_vbl(rdev, true);
194
		radeon_pm_debug_check_in_vbl(rdev, true);
194
			rdev->pm.current_mclk = mclk;
195
			rdev->pm.current_mclk = mclk;
195
			DRM_DEBUG_DRIVER("Setting: m: %d\n", mclk);
196
			DRM_DEBUG_DRIVER("Setting: m: %d\n", mclk);
196
		}
197
		}
197
 
198
 
198
		if (misc_after)
199
		if (misc_after)
199
			/* voltage, pcie lanes, etc.*/
200
			/* voltage, pcie lanes, etc.*/
200
			radeon_pm_misc(rdev);
201
			radeon_pm_misc(rdev);
201
 
202
 
202
		radeon_pm_finish(rdev);
203
		radeon_pm_finish(rdev);
203
 
204
 
204
		rdev->pm.current_power_state_index = rdev->pm.requested_power_state_index;
205
		rdev->pm.current_power_state_index = rdev->pm.requested_power_state_index;
205
		rdev->pm.current_clock_mode_index = rdev->pm.requested_clock_mode_index;
206
		rdev->pm.current_clock_mode_index = rdev->pm.requested_clock_mode_index;
206
	} else
207
	} else
207
		DRM_DEBUG_DRIVER("pm: GUI not idle!!!\n");
208
		DRM_DEBUG_DRIVER("pm: GUI not idle!!!\n");
208
}
209
}
209
 
210
 
210
static void radeon_pm_set_clocks(struct radeon_device *rdev)
211
static void radeon_pm_set_clocks(struct radeon_device *rdev)
211
{
212
{
212
	int i;
213
	int i;
213
 
214
 
214
	/* no need to take locks, etc. if nothing's going to change */
215
	/* no need to take locks, etc. if nothing's going to change */
215
	if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) &&
216
	if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) &&
216
	    (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index))
217
	    (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index))
217
		return;
218
		return;
218
 
219
 
219
	mutex_lock(&rdev->ddev->struct_mutex);
220
	mutex_lock(&rdev->ddev->struct_mutex);
220
	mutex_lock(&rdev->vram_mutex);
221
	mutex_lock(&rdev->vram_mutex);
221
	mutex_lock(&rdev->cp.mutex);
222
	mutex_lock(&rdev->cp.mutex);
222
 
223
 
223
	/* gui idle int has issues on older chips it seems */
224
	/* gui idle int has issues on older chips it seems */
224
	if (rdev->family >= CHIP_R600) {
225
	if (rdev->family >= CHIP_R600) {
225
		if (rdev->irq.installed) {
226
		if (rdev->irq.installed) {
226
			/* wait for GPU idle */
227
			/* wait for GPU idle */
227
			rdev->pm.gui_idle = false;
228
			rdev->pm.gui_idle = false;
228
			rdev->irq.gui_idle = true;
229
			rdev->irq.gui_idle = true;
229
        }
230
        }
230
	} else {
231
	} else {
231
		if (rdev->cp.ready) {
232
		if (rdev->cp.ready) {
232
//           struct radeon_fence *fence;
233
//           struct radeon_fence *fence;
233
//           radeon_ring_alloc(rdev, 64);
234
//           radeon_ring_alloc(rdev, 64);
234
//           radeon_fence_create(rdev, &fence);
235
//           radeon_fence_create(rdev, &fence);
235
//           radeon_fence_emit(rdev, fence);
236
//           radeon_fence_emit(rdev, fence);
236
//           radeon_ring_commit(rdev);
237
//           radeon_ring_commit(rdev);
237
//           radeon_fence_wait(fence, false);
238
//           radeon_fence_wait(fence, false);
238
//           radeon_fence_unref(&fence);
239
//           radeon_fence_unref(&fence);
239
		}
240
		}
240
	}
241
	}
241
	radeon_unmap_vram_bos(rdev);
242
	radeon_unmap_vram_bos(rdev);
242
 
243
 
243
	if (rdev->irq.installed) {
244
	if (rdev->irq.installed) {
244
		for (i = 0; i < rdev->num_crtc; i++) {
245
		for (i = 0; i < rdev->num_crtc; i++) {
245
			if (rdev->pm.active_crtcs & (1 << i)) {
246
			if (rdev->pm.active_crtcs & (1 << i)) {
246
				rdev->pm.req_vblank |= (1 << i);
247
				rdev->pm.req_vblank |= (1 << i);
247
//               drm_vblank_get(rdev->ddev, i);
248
//               drm_vblank_get(rdev->ddev, i);
248
			}
249
			}
249
		}
250
		}
250
	}
251
	}
251
 
252
 
252
	radeon_set_power_state(rdev);
253
	radeon_set_power_state(rdev);
253
 
254
 
254
	if (rdev->irq.installed) {
255
	if (rdev->irq.installed) {
255
		for (i = 0; i < rdev->num_crtc; i++) {
256
		for (i = 0; i < rdev->num_crtc; i++) {
256
			if (rdev->pm.req_vblank & (1 << i)) {
257
			if (rdev->pm.req_vblank & (1 << i)) {
257
				rdev->pm.req_vblank &= ~(1 << i);
258
				rdev->pm.req_vblank &= ~(1 << i);
258
//               drm_vblank_put(rdev->ddev, i);
259
//               drm_vblank_put(rdev->ddev, i);
259
			}
260
			}
260
		}
261
		}
261
	}
262
	}
262
 
263
 
263
	/* update display watermarks based on new power state */
264
	/* update display watermarks based on new power state */
264
	radeon_update_bandwidth_info(rdev);
265
	radeon_update_bandwidth_info(rdev);
265
	if (rdev->pm.active_crtc_count)
266
	if (rdev->pm.active_crtc_count)
266
		radeon_bandwidth_update(rdev);
267
		radeon_bandwidth_update(rdev);
267
 
268
 
268
	rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
269
	rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
269
 
270
 
270
	mutex_unlock(&rdev->cp.mutex);
271
	mutex_unlock(&rdev->cp.mutex);
271
	mutex_unlock(&rdev->vram_mutex);
272
	mutex_unlock(&rdev->vram_mutex);
272
	mutex_unlock(&rdev->ddev->struct_mutex);
273
	mutex_unlock(&rdev->ddev->struct_mutex);
273
}
274
}
274
 
275
 
275
static void radeon_pm_print_states(struct radeon_device *rdev)
276
static void radeon_pm_print_states(struct radeon_device *rdev)
276
{
277
{
277
	int i, j;
278
	int i, j;
278
	struct radeon_power_state *power_state;
279
	struct radeon_power_state *power_state;
279
	struct radeon_pm_clock_info *clock_info;
280
	struct radeon_pm_clock_info *clock_info;
280
 
281
 
281
	DRM_DEBUG_DRIVER("%d Power State(s)\n", rdev->pm.num_power_states);
282
	DRM_DEBUG_DRIVER("%d Power State(s)\n", rdev->pm.num_power_states);
282
	for (i = 0; i < rdev->pm.num_power_states; i++) {
283
	for (i = 0; i < rdev->pm.num_power_states; i++) {
283
		power_state = &rdev->pm.power_state[i];
284
		power_state = &rdev->pm.power_state[i];
284
		DRM_DEBUG_DRIVER("State %d: %s\n", i,
285
		DRM_DEBUG_DRIVER("State %d: %s\n", i,
285
			radeon_pm_state_type_name[power_state->type]);
286
			radeon_pm_state_type_name[power_state->type]);
286
		if (i == rdev->pm.default_power_state_index)
287
		if (i == rdev->pm.default_power_state_index)
287
			DRM_DEBUG_DRIVER("\tDefault");
288
			DRM_DEBUG_DRIVER("\tDefault");
288
		if ((rdev->flags & RADEON_IS_PCIE) && !(rdev->flags & RADEON_IS_IGP))
289
		if ((rdev->flags & RADEON_IS_PCIE) && !(rdev->flags & RADEON_IS_IGP))
289
			DRM_DEBUG_DRIVER("\t%d PCIE Lanes\n", power_state->pcie_lanes);
290
			DRM_DEBUG_DRIVER("\t%d PCIE Lanes\n", power_state->pcie_lanes);
290
		if (power_state->flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
291
		if (power_state->flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
291
			DRM_DEBUG_DRIVER("\tSingle display only\n");
292
			DRM_DEBUG_DRIVER("\tSingle display only\n");
292
		DRM_DEBUG_DRIVER("\t%d Clock Mode(s)\n", power_state->num_clock_modes);
293
		DRM_DEBUG_DRIVER("\t%d Clock Mode(s)\n", power_state->num_clock_modes);
293
		for (j = 0; j < power_state->num_clock_modes; j++) {
294
		for (j = 0; j < power_state->num_clock_modes; j++) {
294
			clock_info = &(power_state->clock_info[j]);
295
			clock_info = &(power_state->clock_info[j]);
295
			if (rdev->flags & RADEON_IS_IGP)
296
			if (rdev->flags & RADEON_IS_IGP)
296
				DRM_DEBUG_DRIVER("\t\t%d e: %d%s\n",
297
				DRM_DEBUG_DRIVER("\t\t%d e: %d%s\n",
297
					j,
298
					j,
298
					clock_info->sclk * 10,
299
					clock_info->sclk * 10,
299
					clock_info->flags & RADEON_PM_MODE_NO_DISPLAY ? "\tNo display only" : "");
300
					clock_info->flags & RADEON_PM_MODE_NO_DISPLAY ? "\tNo display only" : "");
300
			else
301
			else
301
				DRM_DEBUG_DRIVER("\t\t%d e: %d\tm: %d\tv: %d%s\n",
302
				DRM_DEBUG_DRIVER("\t\t%d e: %d\tm: %d\tv: %d%s\n",
302
					j,
303
					j,
303
					clock_info->sclk * 10,
304
					clock_info->sclk * 10,
304
					clock_info->mclk * 10,
305
					clock_info->mclk * 10,
305
					clock_info->voltage.voltage,
306
					clock_info->voltage.voltage,
306
					clock_info->flags & RADEON_PM_MODE_NO_DISPLAY ? "\tNo display only" : "");
307
					clock_info->flags & RADEON_PM_MODE_NO_DISPLAY ? "\tNo display only" : "");
307
		}
308
		}
308
	}
309
	}
309
}
310
}
310
 
311
 
311
static ssize_t radeon_get_pm_profile(struct device *dev,
312
static ssize_t radeon_get_pm_profile(struct device *dev,
312
				     struct device_attribute *attr,
313
				     struct device_attribute *attr,
313
				     char *buf)
314
				     char *buf)
314
{
315
{
315
 
316
 
316
    return snprintf(buf, PAGE_SIZE, "%s\n", "default");
317
    return snprintf(buf, PAGE_SIZE, "%s\n", "default");
317
}
318
}
318
 
319
 
319
static ssize_t radeon_set_pm_profile(struct device *dev,
320
static ssize_t radeon_set_pm_profile(struct device *dev,
320
				     struct device_attribute *attr,
321
				     struct device_attribute *attr,
321
				     const char *buf,
322
				     const char *buf,
322
				     size_t count)
323
				     size_t count)
323
{
324
{
324
	struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
325
	struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
325
	struct radeon_device *rdev = ddev->dev_private;
326
	struct radeon_device *rdev = ddev->dev_private;
326
 
327
 
327
	mutex_lock(&rdev->pm.mutex);
328
	mutex_lock(&rdev->pm.mutex);
328
 
329
 
329
    rdev->pm.profile = PM_PROFILE_DEFAULT;
330
    rdev->pm.profile = PM_PROFILE_DEFAULT;
330
 
331
 
331
    radeon_pm_update_profile(rdev);
332
    radeon_pm_update_profile(rdev);
332
    radeon_pm_set_clocks(rdev);
333
    radeon_pm_set_clocks(rdev);
333
fail:
334
fail:
334
	mutex_unlock(&rdev->pm.mutex);
335
	mutex_unlock(&rdev->pm.mutex);
335
 
336
 
336
	return count;
337
	return count;
337
}
338
}
338
 
339
 
339
static ssize_t radeon_get_pm_method(struct device *dev,
340
static ssize_t radeon_get_pm_method(struct device *dev,
340
				    struct device_attribute *attr,
341
				    struct device_attribute *attr,
341
				    char *buf)
342
				    char *buf)
342
{
343
{
343
	struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
344
	struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
344
	struct radeon_device *rdev = ddev->dev_private;
345
	struct radeon_device *rdev = ddev->dev_private;
345
	int pm = rdev->pm.pm_method;
346
	int pm = rdev->pm.pm_method;
346
 
347
 
347
	return snprintf(buf, PAGE_SIZE, "%s\n",
348
	return snprintf(buf, PAGE_SIZE, "%s\n",
348
			(pm == PM_METHOD_DYNPM) ? "dynpm" : "profile");
349
			(pm == PM_METHOD_DYNPM) ? "dynpm" : "profile");
349
}
350
}
350
 
351
 
351
static ssize_t radeon_set_pm_method(struct device *dev,
352
static ssize_t radeon_set_pm_method(struct device *dev,
352
				    struct device_attribute *attr,
353
				    struct device_attribute *attr,
353
				    const char *buf,
354
				    const char *buf,
354
				    size_t count)
355
				    size_t count)
355
{
356
{
356
	struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
357
	struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
357
	struct radeon_device *rdev = ddev->dev_private;
358
	struct radeon_device *rdev = ddev->dev_private;
358
 
359
 
359
 
360
 
360
	if (strncmp("dynpm", buf, strlen("dynpm")) == 0) {
361
	if (strncmp("dynpm", buf, strlen("dynpm")) == 0) {
361
		mutex_lock(&rdev->pm.mutex);
362
		mutex_lock(&rdev->pm.mutex);
362
		rdev->pm.pm_method = PM_METHOD_DYNPM;
363
		rdev->pm.pm_method = PM_METHOD_DYNPM;
363
		rdev->pm.dynpm_state = DYNPM_STATE_PAUSED;
364
		rdev->pm.dynpm_state = DYNPM_STATE_PAUSED;
364
		rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
365
		rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
365
		mutex_unlock(&rdev->pm.mutex);
366
		mutex_unlock(&rdev->pm.mutex);
366
	} else if (strncmp("profile", buf, strlen("profile")) == 0) {
367
	} else if (strncmp("profile", buf, strlen("profile")) == 0) {
367
		mutex_lock(&rdev->pm.mutex);
368
		mutex_lock(&rdev->pm.mutex);
368
		/* disable dynpm */
369
		/* disable dynpm */
369
		rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
370
		rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
370
		rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
371
		rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
371
		rdev->pm.pm_method = PM_METHOD_PROFILE;
372
		rdev->pm.pm_method = PM_METHOD_PROFILE;
372
		mutex_unlock(&rdev->pm.mutex);
373
		mutex_unlock(&rdev->pm.mutex);
373
//		cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work);
374
//		cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work);
374
	} else {
375
	} else {
375
		DRM_ERROR("invalid power method!\n");
376
		DRM_ERROR("invalid power method!\n");
376
		goto fail;
377
		goto fail;
377
	}
378
	}
378
	radeon_pm_compute_clocks(rdev);
379
	radeon_pm_compute_clocks(rdev);
379
fail:
380
fail:
380
	return count;
381
	return count;
381
}
382
}
382
 
383
 
383
static ssize_t radeon_hwmon_show_temp(struct device *dev,
384
static ssize_t radeon_hwmon_show_temp(struct device *dev,
384
				      struct device_attribute *attr,
385
				      struct device_attribute *attr,
385
				      char *buf)
386
				      char *buf)
386
{
387
{
387
	struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
388
	struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
388
	struct radeon_device *rdev = ddev->dev_private;
389
	struct radeon_device *rdev = ddev->dev_private;
389
	u32 temp;
390
	u32 temp;
390
 
391
 
391
	switch (rdev->pm.int_thermal_type) {
392
	switch (rdev->pm.int_thermal_type) {
392
	case THERMAL_TYPE_RV6XX:
393
	case THERMAL_TYPE_RV6XX:
393
		temp = rv6xx_get_temp(rdev);
394
		temp = rv6xx_get_temp(rdev);
394
		break;
395
		break;
395
	case THERMAL_TYPE_RV770:
396
	case THERMAL_TYPE_RV770:
396
		temp = rv770_get_temp(rdev);
397
		temp = rv770_get_temp(rdev);
397
		break;
398
		break;
398
	case THERMAL_TYPE_EVERGREEN:
399
	case THERMAL_TYPE_EVERGREEN:
399
	case THERMAL_TYPE_NI:
400
	case THERMAL_TYPE_NI:
400
		temp = evergreen_get_temp(rdev);
401
		temp = evergreen_get_temp(rdev);
401
		break;
402
		break;
402
	default:
403
	default:
403
		temp = 0;
404
		temp = 0;
404
		break;
405
		break;
405
	}
406
	}
406
 
407
 
407
	return snprintf(buf, PAGE_SIZE, "%d\n", temp);
408
	return snprintf(buf, PAGE_SIZE, "%d\n", temp);
408
}
409
}
409
 
410
 
410
static ssize_t radeon_hwmon_show_name(struct device *dev,
411
static ssize_t radeon_hwmon_show_name(struct device *dev,
411
				      struct device_attribute *attr,
412
				      struct device_attribute *attr,
412
				      char *buf)
413
				      char *buf)
413
{
414
{
414
	return sprintf(buf, "radeon\n");
415
	return sprintf(buf, "radeon\n");
415
}
416
}
416
 
417
 
417
static int radeon_hwmon_init(struct radeon_device *rdev)
418
static int radeon_hwmon_init(struct radeon_device *rdev)
418
{
419
{
419
	int err = 0;
420
	int err = 0;
420
 
421
 
421
	rdev->pm.int_hwmon_dev = NULL;
422
	rdev->pm.int_hwmon_dev = NULL;
422
 
423
 
423
	return err;
424
	return err;
424
}
425
}
425
 
426
 
426
static void radeon_hwmon_fini(struct radeon_device *rdev)
427
static void radeon_hwmon_fini(struct radeon_device *rdev)
427
{
428
{
428
}
429
}
429
 
430
 
430
void radeon_pm_suspend(struct radeon_device *rdev)
431
void radeon_pm_suspend(struct radeon_device *rdev)
431
{
432
{
432
	mutex_lock(&rdev->pm.mutex);
433
	mutex_lock(&rdev->pm.mutex);
433
	if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
434
	if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
434
		if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE)
435
		if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE)
435
			rdev->pm.dynpm_state = DYNPM_STATE_SUSPENDED;
436
			rdev->pm.dynpm_state = DYNPM_STATE_SUSPENDED;
436
	}
437
	}
437
	mutex_unlock(&rdev->pm.mutex);
438
	mutex_unlock(&rdev->pm.mutex);
438
 
439
 
439
//	cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work);
440
//	cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work);
440
}
441
}
441
 
442
 
442
void radeon_pm_resume(struct radeon_device *rdev)
443
void radeon_pm_resume(struct radeon_device *rdev)
443
{
444
{
444
	/* asic init will reset the default power state */
445
	/* asic init will reset the default power state */
445
	mutex_lock(&rdev->pm.mutex);
446
	mutex_lock(&rdev->pm.mutex);
446
	rdev->pm.current_power_state_index = rdev->pm.default_power_state_index;
447
	rdev->pm.current_power_state_index = rdev->pm.default_power_state_index;
447
	rdev->pm.current_clock_mode_index = 0;
448
	rdev->pm.current_clock_mode_index = 0;
448
	rdev->pm.current_sclk = rdev->pm.default_sclk;
449
	rdev->pm.current_sclk = rdev->pm.default_sclk;
449
	rdev->pm.current_mclk = rdev->pm.default_mclk;
450
	rdev->pm.current_mclk = rdev->pm.default_mclk;
450
	rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage;
451
	rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage;
451
	if (rdev->pm.pm_method == PM_METHOD_DYNPM
452
	if (rdev->pm.pm_method == PM_METHOD_DYNPM
452
	    && rdev->pm.dynpm_state == DYNPM_STATE_SUSPENDED) {
453
	    && rdev->pm.dynpm_state == DYNPM_STATE_SUSPENDED) {
453
		rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
454
		rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
454
//		schedule_delayed_work(&rdev->pm.dynpm_idle_work,
455
//		schedule_delayed_work(&rdev->pm.dynpm_idle_work,
455
//					msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
456
//					msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
456
	}
457
	}
457
	mutex_unlock(&rdev->pm.mutex);
458
	mutex_unlock(&rdev->pm.mutex);
458
	radeon_pm_compute_clocks(rdev);
459
	radeon_pm_compute_clocks(rdev);
459
}
460
}
460
 
461
 
461
int radeon_pm_init(struct radeon_device *rdev)
462
int radeon_pm_init(struct radeon_device *rdev)
462
{
463
{
463
	int ret;
464
	int ret;
464
 
465
 
465
	/* default to profile method */
466
	/* default to profile method */
466
	rdev->pm.pm_method = PM_METHOD_PROFILE;
467
	rdev->pm.pm_method = PM_METHOD_PROFILE;
467
	rdev->pm.profile = PM_PROFILE_DEFAULT;
468
	rdev->pm.profile = PM_PROFILE_DEFAULT;
468
	rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
469
	rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
469
	rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
470
	rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
470
	rdev->pm.dynpm_can_upclock = true;
471
	rdev->pm.dynpm_can_upclock = true;
471
	rdev->pm.dynpm_can_downclock = true;
472
	rdev->pm.dynpm_can_downclock = true;
472
	rdev->pm.default_sclk = rdev->clock.default_sclk;
473
	rdev->pm.default_sclk = rdev->clock.default_sclk;
473
	rdev->pm.default_mclk = rdev->clock.default_mclk;
474
	rdev->pm.default_mclk = rdev->clock.default_mclk;
474
	rdev->pm.current_sclk = rdev->clock.default_sclk;
475
	rdev->pm.current_sclk = rdev->clock.default_sclk;
475
	rdev->pm.current_mclk = rdev->clock.default_mclk;
476
	rdev->pm.current_mclk = rdev->clock.default_mclk;
476
	rdev->pm.int_thermal_type = THERMAL_TYPE_NONE;
477
	rdev->pm.int_thermal_type = THERMAL_TYPE_NONE;
477
 
478
 
478
	if (rdev->bios) {
479
	if (rdev->bios) {
479
		if (rdev->is_atom_bios)
480
		if (rdev->is_atom_bios)
480
			radeon_atombios_get_power_modes(rdev);
481
			radeon_atombios_get_power_modes(rdev);
481
		else
482
		else
482
			radeon_combios_get_power_modes(rdev);
483
			radeon_combios_get_power_modes(rdev);
483
		radeon_pm_print_states(rdev);
484
		radeon_pm_print_states(rdev);
484
		radeon_pm_init_profile(rdev);
485
		radeon_pm_init_profile(rdev);
485
	}
486
	}
486
 
487
 
487
	/* set up the internal thermal sensor if applicable */
488
	/* set up the internal thermal sensor if applicable */
488
	ret = radeon_hwmon_init(rdev);
489
	ret = radeon_hwmon_init(rdev);
489
	if (ret)
490
	if (ret)
490
		return ret;
491
		return ret;
491
 
492
 
492
	if (rdev->pm.num_power_states > 1) {
493
	if (rdev->pm.num_power_states > 1) {
493
 
494
 
494
		DRM_INFO("radeon: power management initialized\n");
495
		DRM_INFO("radeon: power management initialized\n");
495
	}
496
	}
496
 
497
 
497
	return 0;
498
	return 0;
498
}
499
}
499
 
500
 
500
void radeon_pm_fini(struct radeon_device *rdev)
501
void radeon_pm_fini(struct radeon_device *rdev)
501
{
502
{
502
	if (rdev->pm.num_power_states > 1) {
503
	if (rdev->pm.num_power_states > 1) {
503
		mutex_lock(&rdev->pm.mutex);
504
		mutex_lock(&rdev->pm.mutex);
504
		if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
505
		if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
505
			rdev->pm.profile = PM_PROFILE_DEFAULT;
506
			rdev->pm.profile = PM_PROFILE_DEFAULT;
506
			radeon_pm_update_profile(rdev);
507
			radeon_pm_update_profile(rdev);
507
			radeon_pm_set_clocks(rdev);
508
			radeon_pm_set_clocks(rdev);
508
		} else if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
509
		} else if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
509
			/* reset default clocks */
510
			/* reset default clocks */
510
			rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
511
			rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
511
			rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
512
			rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
512
			radeon_pm_set_clocks(rdev);
513
			radeon_pm_set_clocks(rdev);
513
		}
514
		}
514
		mutex_unlock(&rdev->pm.mutex);
515
		mutex_unlock(&rdev->pm.mutex);
515
 
516
 
516
//		cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work);
517
//		cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work);
517
 
518
 
518
    }
519
    }
519
 
520
 
520
	radeon_hwmon_fini(rdev);
521
	radeon_hwmon_fini(rdev);
521
}
522
}
522
 
523
 
523
void radeon_pm_compute_clocks(struct radeon_device *rdev)
524
void radeon_pm_compute_clocks(struct radeon_device *rdev)
524
{
525
{
525
	struct drm_device *ddev = rdev->ddev;
526
	struct drm_device *ddev = rdev->ddev;
526
	struct drm_crtc *crtc;
527
	struct drm_crtc *crtc;
527
	struct radeon_crtc *radeon_crtc;
528
	struct radeon_crtc *radeon_crtc;
528
 
529
 
529
	if (rdev->pm.num_power_states < 2)
530
	if (rdev->pm.num_power_states < 2)
530
		return;
531
		return;
531
 
532
 
532
	mutex_lock(&rdev->pm.mutex);
533
	mutex_lock(&rdev->pm.mutex);
533
 
534
 
534
	rdev->pm.active_crtcs = 0;
535
	rdev->pm.active_crtcs = 0;
535
	rdev->pm.active_crtc_count = 0;
536
	rdev->pm.active_crtc_count = 0;
536
	list_for_each_entry(crtc,
537
	list_for_each_entry(crtc,
537
		&ddev->mode_config.crtc_list, head) {
538
		&ddev->mode_config.crtc_list, head) {
538
		radeon_crtc = to_radeon_crtc(crtc);
539
		radeon_crtc = to_radeon_crtc(crtc);
539
		if (radeon_crtc->enabled) {
540
		if (radeon_crtc->enabled) {
540
			rdev->pm.active_crtcs |= (1 << radeon_crtc->crtc_id);
541
			rdev->pm.active_crtcs |= (1 << radeon_crtc->crtc_id);
541
			rdev->pm.active_crtc_count++;
542
			rdev->pm.active_crtc_count++;
542
		}
543
		}
543
	}
544
	}
544
 
545
 
545
	if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
546
	if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
546
		radeon_pm_update_profile(rdev);
547
		radeon_pm_update_profile(rdev);
547
		radeon_pm_set_clocks(rdev);
548
		radeon_pm_set_clocks(rdev);
548
	} else if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
549
	} else if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
549
		if (rdev->pm.dynpm_state != DYNPM_STATE_DISABLED) {
550
		if (rdev->pm.dynpm_state != DYNPM_STATE_DISABLED) {
550
			if (rdev->pm.active_crtc_count > 1) {
551
			if (rdev->pm.active_crtc_count > 1) {
551
				if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) {
552
				if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) {
552
//                   cancel_delayed_work(&rdev->pm.dynpm_idle_work);
553
//                   cancel_delayed_work(&rdev->pm.dynpm_idle_work);
553
 
554
 
554
					rdev->pm.dynpm_state = DYNPM_STATE_PAUSED;
555
					rdev->pm.dynpm_state = DYNPM_STATE_PAUSED;
555
					rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
556
					rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
556
					radeon_pm_get_dynpm_state(rdev);
557
					radeon_pm_get_dynpm_state(rdev);
557
				radeon_pm_set_clocks(rdev);
558
				radeon_pm_set_clocks(rdev);
558
 
559
 
559
					DRM_DEBUG_DRIVER("radeon: dynamic power management deactivated\n");
560
					DRM_DEBUG_DRIVER("radeon: dynamic power management deactivated\n");
560
		}
561
		}
561
			} else if (rdev->pm.active_crtc_count == 1) {
562
			} else if (rdev->pm.active_crtc_count == 1) {
562
		/* TODO: Increase clocks if needed for current mode */
563
		/* TODO: Increase clocks if needed for current mode */
563
 
564
 
564
				if (rdev->pm.dynpm_state == DYNPM_STATE_MINIMUM) {
565
				if (rdev->pm.dynpm_state == DYNPM_STATE_MINIMUM) {
565
					rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
566
					rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
566
					rdev->pm.dynpm_planned_action = DYNPM_ACTION_UPCLOCK;
567
					rdev->pm.dynpm_planned_action = DYNPM_ACTION_UPCLOCK;
567
					radeon_pm_get_dynpm_state(rdev);
568
					radeon_pm_get_dynpm_state(rdev);
568
			radeon_pm_set_clocks(rdev);
569
			radeon_pm_set_clocks(rdev);
569
 
570
 
570
//					schedule_delayed_work(&rdev->pm.dynpm_idle_work,
571
//					schedule_delayed_work(&rdev->pm.dynpm_idle_work,
571
//							   msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
572
//							   msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
572
				} else if (rdev->pm.dynpm_state == DYNPM_STATE_PAUSED) {
573
				} else if (rdev->pm.dynpm_state == DYNPM_STATE_PAUSED) {
573
					rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
574
					rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
574
//					schedule_delayed_work(&rdev->pm.dynpm_idle_work,
575
//					schedule_delayed_work(&rdev->pm.dynpm_idle_work,
575
//							   msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
576
//							   msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
576
					DRM_DEBUG_DRIVER("radeon: dynamic power management activated\n");
577
					DRM_DEBUG_DRIVER("radeon: dynamic power management activated\n");
577
        }
578
        }
578
			} else { /* count == 0 */
579
			} else { /* count == 0 */
579
				if (rdev->pm.dynpm_state != DYNPM_STATE_MINIMUM) {
580
				if (rdev->pm.dynpm_state != DYNPM_STATE_MINIMUM) {
580
//					cancel_delayed_work(&rdev->pm.dynpm_idle_work);
581
//					cancel_delayed_work(&rdev->pm.dynpm_idle_work);
581
 
582
 
582
					rdev->pm.dynpm_state = DYNPM_STATE_MINIMUM;
583
					rdev->pm.dynpm_state = DYNPM_STATE_MINIMUM;
583
					rdev->pm.dynpm_planned_action = DYNPM_ACTION_MINIMUM;
584
					rdev->pm.dynpm_planned_action = DYNPM_ACTION_MINIMUM;
584
					radeon_pm_get_dynpm_state(rdev);
585
					radeon_pm_get_dynpm_state(rdev);
585
					radeon_pm_set_clocks(rdev);
586
					radeon_pm_set_clocks(rdev);
586
		}
587
		}
587
	}
588
	}
588
		}
589
		}
589
	}
590
	}
590
 
591
 
591
	mutex_unlock(&rdev->pm.mutex);
592
	mutex_unlock(&rdev->pm.mutex);
592
}
593
}
593
 
594
 
594
static bool radeon_pm_in_vbl(struct radeon_device *rdev)
595
static bool radeon_pm_in_vbl(struct radeon_device *rdev)
595
{
596
{
596
	int  crtc, vpos, hpos, vbl_status;
597
	int  crtc, vpos, hpos, vbl_status;
597
	bool in_vbl = true;
598
	bool in_vbl = true;
598
 
599
 
599
	/* Iterate over all active crtc's. All crtc's must be in vblank,
600
	/* Iterate over all active crtc's. All crtc's must be in vblank,
600
	 * otherwise return in_vbl == false.
601
	 * otherwise return in_vbl == false.
601
	 */
602
	 */
602
	for (crtc = 0; (crtc < rdev->num_crtc) && in_vbl; crtc++) {
603
	for (crtc = 0; (crtc < rdev->num_crtc) && in_vbl; crtc++) {
603
		if (rdev->pm.active_crtcs & (1 << crtc)) {
604
		if (rdev->pm.active_crtcs & (1 << crtc)) {
604
			vbl_status = radeon_get_crtc_scanoutpos(rdev->ddev, crtc, &vpos, &hpos);
605
			vbl_status = radeon_get_crtc_scanoutpos(rdev->ddev, crtc, &vpos, &hpos);
605
			if ((vbl_status & DRM_SCANOUTPOS_VALID) &&
606
			if ((vbl_status & DRM_SCANOUTPOS_VALID) &&
606
			    !(vbl_status & DRM_SCANOUTPOS_INVBL))
607
			    !(vbl_status & DRM_SCANOUTPOS_INVBL))
607
				in_vbl = false;
608
				in_vbl = false;
608
		}
609
		}
609
		}
610
		}
610
 
611
 
611
	return in_vbl;
612
	return in_vbl;
612
}
613
}
613
 
614
 
614
static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish)
615
static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish)
615
{
616
{
616
	u32 stat_crtc = 0;
617
	u32 stat_crtc = 0;
617
	bool in_vbl = radeon_pm_in_vbl(rdev);
618
	bool in_vbl = radeon_pm_in_vbl(rdev);
618
 
619
 
619
	if (in_vbl == false)
620
	if (in_vbl == false)
620
		DRM_DEBUG_DRIVER("not in vbl for pm change %08x at %s\n", stat_crtc,
621
		DRM_DEBUG_DRIVER("not in vbl for pm change %08x at %s\n", stat_crtc,
621
			 finish ? "exit" : "entry");
622
			 finish ? "exit" : "entry");
622
	return in_vbl;
623
	return in_vbl;
623
}
624
}
624
 
625
 
625
 
626
 
626
/*
627
/*
627
 * Debugfs info
628
 * Debugfs info
628
 */
629
 */
629
#if defined(CONFIG_DEBUG_FS)
630
#if defined(CONFIG_DEBUG_FS)
630
 
631
 
631
static int radeon_debugfs_pm_info(struct seq_file *m, void *data)
632
static int radeon_debugfs_pm_info(struct seq_file *m, void *data)
632
{
633
{
633
	struct drm_info_node *node = (struct drm_info_node *) m->private;
634
	struct drm_info_node *node = (struct drm_info_node *) m->private;
634
	struct drm_device *dev = node->minor->dev;
635
	struct drm_device *dev = node->minor->dev;
635
	struct radeon_device *rdev = dev->dev_private;
636
	struct radeon_device *rdev = dev->dev_private;
636
 
637
 
637
	seq_printf(m, "default engine clock: %u0 kHz\n", rdev->pm.default_sclk);
638
	seq_printf(m, "default engine clock: %u0 kHz\n", rdev->pm.default_sclk);
638
	seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev));
639
	seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev));
639
	seq_printf(m, "default memory clock: %u0 kHz\n", rdev->pm.default_mclk);
640
	seq_printf(m, "default memory clock: %u0 kHz\n", rdev->pm.default_mclk);
640
	if (rdev->asic->get_memory_clock)
641
	if (rdev->asic->get_memory_clock)
641
		seq_printf(m, "current memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev));
642
		seq_printf(m, "current memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev));
642
	if (rdev->pm.current_vddc)
643
	if (rdev->pm.current_vddc)
643
		seq_printf(m, "voltage: %u mV\n", rdev->pm.current_vddc);
644
		seq_printf(m, "voltage: %u mV\n", rdev->pm.current_vddc);
644
	if (rdev->asic->get_pcie_lanes)
645
	if (rdev->asic->get_pcie_lanes)
645
		seq_printf(m, "PCIE lanes: %d\n", radeon_get_pcie_lanes(rdev));
646
		seq_printf(m, "PCIE lanes: %d\n", radeon_get_pcie_lanes(rdev));
646
 
647
 
647
	return 0;
648
	return 0;
648
}
649
}
649
 
650
 
650
static struct drm_info_list radeon_pm_info_list[] = {
651
static struct drm_info_list radeon_pm_info_list[] = {
651
	{"radeon_pm_info", radeon_debugfs_pm_info, 0, NULL},
652
	{"radeon_pm_info", radeon_debugfs_pm_info, 0, NULL},
652
};
653
};
653
#endif
654
#endif
654
 
655
 
655
static int radeon_debugfs_pm_init(struct radeon_device *rdev)
656
static int radeon_debugfs_pm_init(struct radeon_device *rdev)
656
{
657
{
657
#if defined(CONFIG_DEBUG_FS)
658
#if defined(CONFIG_DEBUG_FS)
658
	return radeon_debugfs_add_files(rdev, radeon_pm_info_list, ARRAY_SIZE(radeon_pm_info_list));
659
	return radeon_debugfs_add_files(rdev, radeon_pm_info_list, ARRAY_SIZE(radeon_pm_info_list));
659
#else
660
#else
660
	return 0;
661
	return 0;
661
#endif
662
#endif
662
}
663
}