Subversion Repositories Kolibri OS

Rev

Rev 6661 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 6661 Rev 6938
1
/*
1
/*
2
 * Permission is hereby granted, free of charge, to any person obtaining a
2
 * Permission is hereby granted, free of charge, to any person obtaining a
3
 * copy of this software and associated documentation files (the "Software"),
3
 * copy of this software and associated documentation files (the "Software"),
4
 * to deal in the Software without restriction, including without limitation
4
 * to deal in the Software without restriction, including without limitation
5
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
5
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
6
 * and/or sell copies of the Software, and to permit persons to whom the
6
 * and/or sell copies of the Software, and to permit persons to whom the
7
 * Software is furnished to do so, subject to the following conditions:
7
 * Software is furnished to do so, subject to the following conditions:
8
 *
8
 *
9
 * The above copyright notice and this permission notice shall be included in
9
 * The above copyright notice and this permission notice shall be included in
10
 * all copies or substantial portions of the Software.
10
 * all copies or substantial portions of the Software.
11
 *
11
 *
12
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
12
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
13
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
14
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
15
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
15
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
16
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
16
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
17
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
17
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
18
 * OTHER DEALINGS IN THE SOFTWARE.
18
 * OTHER DEALINGS IN THE SOFTWARE.
19
 *
19
 *
20
 * Authors: Rafał Miłecki 
20
 * Authors: Rafał Miłecki 
21
 *          Alex Deucher 
21
 *          Alex Deucher 
22
 */
22
 */
23
#include 
23
#include 
24
#include "radeon.h"
24
#include "radeon.h"
25
#include "avivod.h"
25
#include "avivod.h"
26
#include "atom.h"
26
#include "atom.h"
27
#include "r600_dpm.h"
27
#include "r600_dpm.h"
28
 
28
 
29
#define RADEON_IDLE_LOOP_MS 100
29
#define RADEON_IDLE_LOOP_MS 100
30
#define RADEON_RECLOCK_DELAY_MS 200
30
#define RADEON_RECLOCK_DELAY_MS 200
31
#define RADEON_WAIT_VBLANK_TIMEOUT 200
31
#define RADEON_WAIT_VBLANK_TIMEOUT 200
32
 
32
 
33
static const char *radeon_pm_state_type_name[5] = {
33
static const char *radeon_pm_state_type_name[5] = {
34
	"",
34
	"",
35
	"Powersave",
35
	"Powersave",
36
	"Battery",
36
	"Battery",
37
	"Balanced",
37
	"Balanced",
38
	"Performance",
38
	"Performance",
39
};
39
};
40
 
40
 
41
static void radeon_dynpm_idle_work_handler(struct work_struct *work);
41
static void radeon_dynpm_idle_work_handler(struct work_struct *work);
42
static int radeon_debugfs_pm_init(struct radeon_device *rdev);
42
static int radeon_debugfs_pm_init(struct radeon_device *rdev);
43
static bool radeon_pm_in_vbl(struct radeon_device *rdev);
43
static bool radeon_pm_in_vbl(struct radeon_device *rdev);
44
static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish);
44
static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish);
45
static void radeon_pm_update_profile(struct radeon_device *rdev);
45
static void radeon_pm_update_profile(struct radeon_device *rdev);
46
static void radeon_pm_set_clocks(struct radeon_device *rdev);
46
static void radeon_pm_set_clocks(struct radeon_device *rdev);
47
 
47
 
48
int radeon_pm_get_type_index(struct radeon_device *rdev,
48
int radeon_pm_get_type_index(struct radeon_device *rdev,
49
			     enum radeon_pm_state_type ps_type,
49
			     enum radeon_pm_state_type ps_type,
50
			     int instance)
50
			     int instance)
51
{
51
{
52
	int i;
52
	int i;
53
	int found_instance = -1;
53
	int found_instance = -1;
54
 
54
 
55
	for (i = 0; i < rdev->pm.num_power_states; i++) {
55
	for (i = 0; i < rdev->pm.num_power_states; i++) {
56
		if (rdev->pm.power_state[i].type == ps_type) {
56
		if (rdev->pm.power_state[i].type == ps_type) {
57
			found_instance++;
57
			found_instance++;
58
			if (found_instance == instance)
58
			if (found_instance == instance)
59
				return i;
59
				return i;
60
		}
60
		}
61
	}
61
	}
62
	/* return default if no match */
62
	/* return default if no match */
63
	return rdev->pm.default_power_state_index;
63
	return rdev->pm.default_power_state_index;
64
}
64
}
65
 
65
 
66
void radeon_pm_acpi_event_handler(struct radeon_device *rdev)
66
void radeon_pm_acpi_event_handler(struct radeon_device *rdev)
67
{
67
{
68
	if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
68
	if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
69
		mutex_lock(&rdev->pm.mutex);
69
		mutex_lock(&rdev->pm.mutex);
70
		if (power_supply_is_system_supplied() > 0)
70
		if (power_supply_is_system_supplied() > 0)
71
			rdev->pm.dpm.ac_power = true;
71
			rdev->pm.dpm.ac_power = true;
72
		else
72
		else
73
			rdev->pm.dpm.ac_power = false;
73
			rdev->pm.dpm.ac_power = false;
74
		if (rdev->family == CHIP_ARUBA) {
74
		if (rdev->family == CHIP_ARUBA) {
75
			if (rdev->asic->dpm.enable_bapm)
75
			if (rdev->asic->dpm.enable_bapm)
76
				radeon_dpm_enable_bapm(rdev, rdev->pm.dpm.ac_power);
76
				radeon_dpm_enable_bapm(rdev, rdev->pm.dpm.ac_power);
77
		}
77
		}
78
		mutex_unlock(&rdev->pm.mutex);
78
		mutex_unlock(&rdev->pm.mutex);
79
        } else if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
79
        } else if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
80
		if (rdev->pm.profile == PM_PROFILE_AUTO) {
80
		if (rdev->pm.profile == PM_PROFILE_AUTO) {
81
			mutex_lock(&rdev->pm.mutex);
81
			mutex_lock(&rdev->pm.mutex);
82
			radeon_pm_update_profile(rdev);
82
			radeon_pm_update_profile(rdev);
83
			radeon_pm_set_clocks(rdev);
83
			radeon_pm_set_clocks(rdev);
84
			mutex_unlock(&rdev->pm.mutex);
84
			mutex_unlock(&rdev->pm.mutex);
85
		}
85
		}
86
	}
86
	}
87
}
87
}
88
 
88
 
89
static void radeon_pm_update_profile(struct radeon_device *rdev)
89
static void radeon_pm_update_profile(struct radeon_device *rdev)
90
{
90
{
91
	switch (rdev->pm.profile) {
91
	switch (rdev->pm.profile) {
92
	case PM_PROFILE_DEFAULT:
92
	case PM_PROFILE_DEFAULT:
93
		rdev->pm.profile_index = PM_PROFILE_DEFAULT_IDX;
93
		rdev->pm.profile_index = PM_PROFILE_DEFAULT_IDX;
94
		break;
94
		break;
95
	case PM_PROFILE_AUTO:
95
	case PM_PROFILE_AUTO:
96
		if (power_supply_is_system_supplied() > 0) {
96
		if (power_supply_is_system_supplied() > 0) {
97
			if (rdev->pm.active_crtc_count > 1)
97
			if (rdev->pm.active_crtc_count > 1)
98
				rdev->pm.profile_index = PM_PROFILE_HIGH_MH_IDX;
98
				rdev->pm.profile_index = PM_PROFILE_HIGH_MH_IDX;
99
			else
99
			else
100
				rdev->pm.profile_index = PM_PROFILE_HIGH_SH_IDX;
100
				rdev->pm.profile_index = PM_PROFILE_HIGH_SH_IDX;
101
		} else {
101
		} else {
102
			if (rdev->pm.active_crtc_count > 1)
102
			if (rdev->pm.active_crtc_count > 1)
103
				rdev->pm.profile_index = PM_PROFILE_MID_MH_IDX;
103
				rdev->pm.profile_index = PM_PROFILE_MID_MH_IDX;
104
			else
104
			else
105
				rdev->pm.profile_index = PM_PROFILE_MID_SH_IDX;
105
				rdev->pm.profile_index = PM_PROFILE_MID_SH_IDX;
106
		}
106
		}
107
		break;
107
		break;
108
	case PM_PROFILE_LOW:
108
	case PM_PROFILE_LOW:
109
		if (rdev->pm.active_crtc_count > 1)
109
		if (rdev->pm.active_crtc_count > 1)
110
			rdev->pm.profile_index = PM_PROFILE_LOW_MH_IDX;
110
			rdev->pm.profile_index = PM_PROFILE_LOW_MH_IDX;
111
		else
111
		else
112
			rdev->pm.profile_index = PM_PROFILE_LOW_SH_IDX;
112
			rdev->pm.profile_index = PM_PROFILE_LOW_SH_IDX;
113
		break;
113
		break;
114
	case PM_PROFILE_MID:
114
	case PM_PROFILE_MID:
115
		if (rdev->pm.active_crtc_count > 1)
115
		if (rdev->pm.active_crtc_count > 1)
116
			rdev->pm.profile_index = PM_PROFILE_MID_MH_IDX;
116
			rdev->pm.profile_index = PM_PROFILE_MID_MH_IDX;
117
		else
117
		else
118
			rdev->pm.profile_index = PM_PROFILE_MID_SH_IDX;
118
			rdev->pm.profile_index = PM_PROFILE_MID_SH_IDX;
119
		break;
119
		break;
120
	case PM_PROFILE_HIGH:
120
	case PM_PROFILE_HIGH:
121
		if (rdev->pm.active_crtc_count > 1)
121
		if (rdev->pm.active_crtc_count > 1)
122
			rdev->pm.profile_index = PM_PROFILE_HIGH_MH_IDX;
122
			rdev->pm.profile_index = PM_PROFILE_HIGH_MH_IDX;
123
		else
123
		else
124
			rdev->pm.profile_index = PM_PROFILE_HIGH_SH_IDX;
124
			rdev->pm.profile_index = PM_PROFILE_HIGH_SH_IDX;
125
		break;
125
		break;
126
	}
126
	}
127
 
127
 
128
	if (rdev->pm.active_crtc_count == 0) {
128
	if (rdev->pm.active_crtc_count == 0) {
129
		rdev->pm.requested_power_state_index =
129
		rdev->pm.requested_power_state_index =
130
			rdev->pm.profiles[rdev->pm.profile_index].dpms_off_ps_idx;
130
			rdev->pm.profiles[rdev->pm.profile_index].dpms_off_ps_idx;
131
		rdev->pm.requested_clock_mode_index =
131
		rdev->pm.requested_clock_mode_index =
132
			rdev->pm.profiles[rdev->pm.profile_index].dpms_off_cm_idx;
132
			rdev->pm.profiles[rdev->pm.profile_index].dpms_off_cm_idx;
133
	} else {
133
	} else {
134
		rdev->pm.requested_power_state_index =
134
		rdev->pm.requested_power_state_index =
135
			rdev->pm.profiles[rdev->pm.profile_index].dpms_on_ps_idx;
135
			rdev->pm.profiles[rdev->pm.profile_index].dpms_on_ps_idx;
136
		rdev->pm.requested_clock_mode_index =
136
		rdev->pm.requested_clock_mode_index =
137
			rdev->pm.profiles[rdev->pm.profile_index].dpms_on_cm_idx;
137
			rdev->pm.profiles[rdev->pm.profile_index].dpms_on_cm_idx;
138
	}
138
	}
139
}
139
}
140
 
140
 
141
static void radeon_unmap_vram_bos(struct radeon_device *rdev)
141
static void radeon_unmap_vram_bos(struct radeon_device *rdev)
142
{
142
{
143
	struct radeon_bo *bo, *n;
143
	struct radeon_bo *bo, *n;
144
 
144
 
145
	if (list_empty(&rdev->gem.objects))
145
	if (list_empty(&rdev->gem.objects))
146
		return;
146
		return;
147
 
147
 
148
	list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) {
148
	list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) {
149
		if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
149
		if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
150
			ttm_bo_unmap_virtual(&bo->tbo);
150
			ttm_bo_unmap_virtual(&bo->tbo);
151
	}
151
	}
152
}
152
}
153
 
153
 
154
static void radeon_sync_with_vblank(struct radeon_device *rdev)
154
static void radeon_sync_with_vblank(struct radeon_device *rdev)
155
{
155
{
156
	if (rdev->pm.active_crtcs) {
156
	if (rdev->pm.active_crtcs) {
157
		rdev->pm.vblank_sync = false;
157
		rdev->pm.vblank_sync = false;
158
		wait_event_timeout(
158
		wait_event_timeout(
159
			rdev->irq.vblank_queue, rdev->pm.vblank_sync,
159
			rdev->irq.vblank_queue, rdev->pm.vblank_sync,
160
			msecs_to_jiffies(RADEON_WAIT_VBLANK_TIMEOUT));
160
			msecs_to_jiffies(RADEON_WAIT_VBLANK_TIMEOUT));
161
	}
161
	}
162
}
162
}
163
 
163
 
164
static void radeon_set_power_state(struct radeon_device *rdev)
164
static void radeon_set_power_state(struct radeon_device *rdev)
165
{
165
{
166
	u32 sclk, mclk;
166
	u32 sclk, mclk;
167
	bool misc_after = false;
167
	bool misc_after = false;
168
 
168
 
169
	if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) &&
169
	if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) &&
170
	    (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index))
170
	    (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index))
171
		return;
171
		return;
172
 
172
 
173
	if (radeon_gui_idle(rdev)) {
173
	if (radeon_gui_idle(rdev)) {
174
		sclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
174
		sclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
175
			clock_info[rdev->pm.requested_clock_mode_index].sclk;
175
			clock_info[rdev->pm.requested_clock_mode_index].sclk;
176
		if (sclk > rdev->pm.default_sclk)
176
		if (sclk > rdev->pm.default_sclk)
177
			sclk = rdev->pm.default_sclk;
177
			sclk = rdev->pm.default_sclk;
178
 
178
 
179
		/* starting with BTC, there is one state that is used for both
179
		/* starting with BTC, there is one state that is used for both
180
		 * MH and SH.  Difference is that we always use the high clock index for
180
		 * MH and SH.  Difference is that we always use the high clock index for
181
		 * mclk and vddci.
181
		 * mclk and vddci.
182
		 */
182
		 */
183
		if ((rdev->pm.pm_method == PM_METHOD_PROFILE) &&
183
		if ((rdev->pm.pm_method == PM_METHOD_PROFILE) &&
184
		    (rdev->family >= CHIP_BARTS) &&
184
		    (rdev->family >= CHIP_BARTS) &&
185
		    rdev->pm.active_crtc_count &&
185
		    rdev->pm.active_crtc_count &&
186
		    ((rdev->pm.profile_index == PM_PROFILE_MID_MH_IDX) ||
186
		    ((rdev->pm.profile_index == PM_PROFILE_MID_MH_IDX) ||
187
		     (rdev->pm.profile_index == PM_PROFILE_LOW_MH_IDX)))
187
		     (rdev->pm.profile_index == PM_PROFILE_LOW_MH_IDX)))
188
			mclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
188
			mclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
189
				clock_info[rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx].mclk;
189
				clock_info[rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx].mclk;
190
		else
190
		else
191
			mclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
191
			mclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
192
				clock_info[rdev->pm.requested_clock_mode_index].mclk;
192
				clock_info[rdev->pm.requested_clock_mode_index].mclk;
193
 
193
 
194
		if (mclk > rdev->pm.default_mclk)
194
		if (mclk > rdev->pm.default_mclk)
195
			mclk = rdev->pm.default_mclk;
195
			mclk = rdev->pm.default_mclk;
196
 
196
 
197
		/* upvolt before raising clocks, downvolt after lowering clocks */
197
		/* upvolt before raising clocks, downvolt after lowering clocks */
198
		if (sclk < rdev->pm.current_sclk)
198
		if (sclk < rdev->pm.current_sclk)
199
			misc_after = true;
199
			misc_after = true;
200
 
200
 
201
		radeon_sync_with_vblank(rdev);
201
		radeon_sync_with_vblank(rdev);
202
 
202
 
203
		if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
203
		if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
204
			if (!radeon_pm_in_vbl(rdev))
204
			if (!radeon_pm_in_vbl(rdev))
205
				return;
205
				return;
206
		}
206
		}
207
 
207
 
208
		radeon_pm_prepare(rdev);
208
		radeon_pm_prepare(rdev);
209
 
209
 
210
		if (!misc_after)
210
		if (!misc_after)
211
			/* voltage, pcie lanes, etc.*/
211
			/* voltage, pcie lanes, etc.*/
212
			radeon_pm_misc(rdev);
212
			radeon_pm_misc(rdev);
213
 
213
 
214
		/* set engine clock */
214
		/* set engine clock */
215
		if (sclk != rdev->pm.current_sclk) {
215
		if (sclk != rdev->pm.current_sclk) {
216
			radeon_pm_debug_check_in_vbl(rdev, false);
216
			radeon_pm_debug_check_in_vbl(rdev, false);
217
			radeon_set_engine_clock(rdev, sclk);
217
			radeon_set_engine_clock(rdev, sclk);
218
			radeon_pm_debug_check_in_vbl(rdev, true);
218
			radeon_pm_debug_check_in_vbl(rdev, true);
219
			rdev->pm.current_sclk = sclk;
219
			rdev->pm.current_sclk = sclk;
220
			DRM_DEBUG_DRIVER("Setting: e: %d\n", sclk);
220
			DRM_DEBUG_DRIVER("Setting: e: %d\n", sclk);
221
		}
221
		}
222
 
222
 
223
		/* set memory clock */
223
		/* set memory clock */
224
		if (rdev->asic->pm.set_memory_clock && (mclk != rdev->pm.current_mclk)) {
224
		if (rdev->asic->pm.set_memory_clock && (mclk != rdev->pm.current_mclk)) {
225
			radeon_pm_debug_check_in_vbl(rdev, false);
225
			radeon_pm_debug_check_in_vbl(rdev, false);
226
			radeon_set_memory_clock(rdev, mclk);
226
			radeon_set_memory_clock(rdev, mclk);
227
			radeon_pm_debug_check_in_vbl(rdev, true);
227
			radeon_pm_debug_check_in_vbl(rdev, true);
228
			rdev->pm.current_mclk = mclk;
228
			rdev->pm.current_mclk = mclk;
229
			DRM_DEBUG_DRIVER("Setting: m: %d\n", mclk);
229
			DRM_DEBUG_DRIVER("Setting: m: %d\n", mclk);
230
		}
230
		}
231
 
231
 
232
		if (misc_after)
232
		if (misc_after)
233
			/* voltage, pcie lanes, etc.*/
233
			/* voltage, pcie lanes, etc.*/
234
			radeon_pm_misc(rdev);
234
			radeon_pm_misc(rdev);
235
 
235
 
236
		radeon_pm_finish(rdev);
236
		radeon_pm_finish(rdev);
237
 
237
 
238
		rdev->pm.current_power_state_index = rdev->pm.requested_power_state_index;
238
		rdev->pm.current_power_state_index = rdev->pm.requested_power_state_index;
239
		rdev->pm.current_clock_mode_index = rdev->pm.requested_clock_mode_index;
239
		rdev->pm.current_clock_mode_index = rdev->pm.requested_clock_mode_index;
240
	} else
240
	} else
241
		DRM_DEBUG_DRIVER("pm: GUI not idle!!!\n");
241
		DRM_DEBUG_DRIVER("pm: GUI not idle!!!\n");
242
}
242
}
243
 
243
 
244
static void radeon_pm_set_clocks(struct radeon_device *rdev)
244
static void radeon_pm_set_clocks(struct radeon_device *rdev)
245
{
245
{
246
	int i, r;
246
	int i, r;
247
 
247
 
248
	/* no need to take locks, etc. if nothing's going to change */
248
	/* no need to take locks, etc. if nothing's going to change */
249
	if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) &&
249
	if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) &&
250
	    (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index))
250
	    (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index))
251
		return;
251
		return;
252
 
252
 
253
	down_write(&rdev->pm.mclk_lock);
253
	down_write(&rdev->pm.mclk_lock);
254
	mutex_lock(&rdev->ring_lock);
254
	mutex_lock(&rdev->ring_lock);
255
 
255
 
256
	/* wait for the rings to drain */
256
	/* wait for the rings to drain */
257
	for (i = 0; i < RADEON_NUM_RINGS; i++) {
257
	for (i = 0; i < RADEON_NUM_RINGS; i++) {
258
		struct radeon_ring *ring = &rdev->ring[i];
258
		struct radeon_ring *ring = &rdev->ring[i];
259
		if (!ring->ready) {
259
		if (!ring->ready) {
260
			continue;
260
			continue;
261
		}
261
		}
262
		r = radeon_fence_wait_empty(rdev, i);
262
		r = radeon_fence_wait_empty(rdev, i);
263
		if (r) {
263
		if (r) {
264
			/* needs a GPU reset dont reset here */
264
			/* needs a GPU reset dont reset here */
265
			mutex_unlock(&rdev->ring_lock);
265
			mutex_unlock(&rdev->ring_lock);
266
			up_write(&rdev->pm.mclk_lock);
266
			up_write(&rdev->pm.mclk_lock);
267
			return;
267
			return;
268
		}
268
		}
269
	}
269
	}
270
 
270
 
271
	radeon_unmap_vram_bos(rdev);
271
	radeon_unmap_vram_bos(rdev);
272
 
272
 
273
	if (rdev->irq.installed) {
273
	if (rdev->irq.installed) {
274
		for (i = 0; i < rdev->num_crtc; i++) {
274
		for (i = 0; i < rdev->num_crtc; i++) {
275
			if (rdev->pm.active_crtcs & (1 << i)) {
275
			if (rdev->pm.active_crtcs & (1 << i)) {
-
 
276
				/* This can fail if a modeset is in progress */
-
 
277
				if (drm_vblank_get(rdev->ddev, i) == 0)
276
				rdev->pm.req_vblank |= (1 << i);
278
				rdev->pm.req_vblank |= (1 << i);
-
 
279
				else
277
				drm_vblank_get(rdev->ddev, i);
280
					DRM_DEBUG_DRIVER("crtc %d no vblank, can glitch\n",
-
 
281
							 i);
278
			}
282
			}
279
		}
283
		}
280
	}
284
	}
281
 
285
 
282
	radeon_set_power_state(rdev);
286
	radeon_set_power_state(rdev);
283
 
287
 
284
	if (rdev->irq.installed) {
288
	if (rdev->irq.installed) {
285
		for (i = 0; i < rdev->num_crtc; i++) {
289
		for (i = 0; i < rdev->num_crtc; i++) {
286
			if (rdev->pm.req_vblank & (1 << i)) {
290
			if (rdev->pm.req_vblank & (1 << i)) {
287
				rdev->pm.req_vblank &= ~(1 << i);
291
				rdev->pm.req_vblank &= ~(1 << i);
288
				drm_vblank_put(rdev->ddev, i);
292
				drm_vblank_put(rdev->ddev, i);
289
			}
293
			}
290
		}
294
		}
291
	}
295
	}
292
 
296
 
293
	/* update display watermarks based on new power state */
297
	/* update display watermarks based on new power state */
294
	radeon_update_bandwidth_info(rdev);
298
	radeon_update_bandwidth_info(rdev);
295
	if (rdev->pm.active_crtc_count)
299
	if (rdev->pm.active_crtc_count)
296
		radeon_bandwidth_update(rdev);
300
		radeon_bandwidth_update(rdev);
297
 
301
 
298
	rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
302
	rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
299
 
303
 
300
	mutex_unlock(&rdev->ring_lock);
304
	mutex_unlock(&rdev->ring_lock);
301
	up_write(&rdev->pm.mclk_lock);
305
	up_write(&rdev->pm.mclk_lock);
302
}
306
}
303
 
307
 
304
static void radeon_pm_print_states(struct radeon_device *rdev)
308
static void radeon_pm_print_states(struct radeon_device *rdev)
305
{
309
{
306
	int i, j;
310
	int i, j;
307
	struct radeon_power_state *power_state;
311
	struct radeon_power_state *power_state;
308
	struct radeon_pm_clock_info *clock_info;
312
	struct radeon_pm_clock_info *clock_info;
309
 
313
 
310
	DRM_DEBUG_DRIVER("%d Power State(s)\n", rdev->pm.num_power_states);
314
	DRM_DEBUG_DRIVER("%d Power State(s)\n", rdev->pm.num_power_states);
311
	for (i = 0; i < rdev->pm.num_power_states; i++) {
315
	for (i = 0; i < rdev->pm.num_power_states; i++) {
312
		power_state = &rdev->pm.power_state[i];
316
		power_state = &rdev->pm.power_state[i];
313
		DRM_DEBUG_DRIVER("State %d: %s\n", i,
317
		DRM_DEBUG_DRIVER("State %d: %s\n", i,
314
			radeon_pm_state_type_name[power_state->type]);
318
			radeon_pm_state_type_name[power_state->type]);
315
		if (i == rdev->pm.default_power_state_index)
319
		if (i == rdev->pm.default_power_state_index)
316
			DRM_DEBUG_DRIVER("\tDefault");
320
			DRM_DEBUG_DRIVER("\tDefault");
317
		if ((rdev->flags & RADEON_IS_PCIE) && !(rdev->flags & RADEON_IS_IGP))
321
		if ((rdev->flags & RADEON_IS_PCIE) && !(rdev->flags & RADEON_IS_IGP))
318
			DRM_DEBUG_DRIVER("\t%d PCIE Lanes\n", power_state->pcie_lanes);
322
			DRM_DEBUG_DRIVER("\t%d PCIE Lanes\n", power_state->pcie_lanes);
319
		if (power_state->flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
323
		if (power_state->flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
320
			DRM_DEBUG_DRIVER("\tSingle display only\n");
324
			DRM_DEBUG_DRIVER("\tSingle display only\n");
321
		DRM_DEBUG_DRIVER("\t%d Clock Mode(s)\n", power_state->num_clock_modes);
325
		DRM_DEBUG_DRIVER("\t%d Clock Mode(s)\n", power_state->num_clock_modes);
322
		for (j = 0; j < power_state->num_clock_modes; j++) {
326
		for (j = 0; j < power_state->num_clock_modes; j++) {
323
			clock_info = &(power_state->clock_info[j]);
327
			clock_info = &(power_state->clock_info[j]);
324
			if (rdev->flags & RADEON_IS_IGP)
328
			if (rdev->flags & RADEON_IS_IGP)
325
				DRM_DEBUG_DRIVER("\t\t%d e: %d\n",
329
				DRM_DEBUG_DRIVER("\t\t%d e: %d\n",
326
						 j,
330
						 j,
327
						 clock_info->sclk * 10);
331
						 clock_info->sclk * 10);
328
			else
332
			else
329
				DRM_DEBUG_DRIVER("\t\t%d e: %d\tm: %d\tv: %d\n",
333
				DRM_DEBUG_DRIVER("\t\t%d e: %d\tm: %d\tv: %d\n",
330
						 j,
334
						 j,
331
						 clock_info->sclk * 10,
335
						 clock_info->sclk * 10,
332
						 clock_info->mclk * 10,
336
						 clock_info->mclk * 10,
333
						 clock_info->voltage.voltage);
337
						 clock_info->voltage.voltage);
334
		}
338
		}
335
	}
339
	}
336
}
340
}
337
 
341
 
338
static ssize_t radeon_get_pm_profile(struct device *dev,
342
static ssize_t radeon_get_pm_profile(struct device *dev,
339
				     struct device_attribute *attr,
343
				     struct device_attribute *attr,
340
				     char *buf)
344
				     char *buf)
341
{
345
{
342
	struct drm_device *ddev = dev_get_drvdata(dev);
346
	struct drm_device *ddev = dev_get_drvdata(dev);
343
	struct radeon_device *rdev = ddev->dev_private;
347
	struct radeon_device *rdev = ddev->dev_private;
344
	int cp = rdev->pm.profile;
348
	int cp = rdev->pm.profile;
345
 
349
 
346
	return snprintf(buf, PAGE_SIZE, "%s\n",
350
	return snprintf(buf, PAGE_SIZE, "%s\n",
347
			(cp == PM_PROFILE_AUTO) ? "auto" :
351
			(cp == PM_PROFILE_AUTO) ? "auto" :
348
			(cp == PM_PROFILE_LOW) ? "low" :
352
			(cp == PM_PROFILE_LOW) ? "low" :
349
			(cp == PM_PROFILE_MID) ? "mid" :
353
			(cp == PM_PROFILE_MID) ? "mid" :
350
			(cp == PM_PROFILE_HIGH) ? "high" : "default");
354
			(cp == PM_PROFILE_HIGH) ? "high" : "default");
351
}
355
}
352
 
356
 
353
static ssize_t radeon_set_pm_profile(struct device *dev,
357
static ssize_t radeon_set_pm_profile(struct device *dev,
354
				     struct device_attribute *attr,
358
				     struct device_attribute *attr,
355
				     const char *buf,
359
				     const char *buf,
356
				     size_t count)
360
				     size_t count)
357
{
361
{
358
	struct drm_device *ddev = dev_get_drvdata(dev);
362
	struct drm_device *ddev = dev_get_drvdata(dev);
359
	struct radeon_device *rdev = ddev->dev_private;
363
	struct radeon_device *rdev = ddev->dev_private;
360
 
364
 
361
	/* Can't set profile when the card is off */
365
	/* Can't set profile when the card is off */
362
	if  ((rdev->flags & RADEON_IS_PX) &&
366
	if  ((rdev->flags & RADEON_IS_PX) &&
363
	     (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
367
	     (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
364
		return -EINVAL;
368
		return -EINVAL;
365
 
369
 
366
	mutex_lock(&rdev->pm.mutex);
370
	mutex_lock(&rdev->pm.mutex);
367
	if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
371
	if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
368
		if (strncmp("default", buf, strlen("default")) == 0)
372
		if (strncmp("default", buf, strlen("default")) == 0)
369
			rdev->pm.profile = PM_PROFILE_DEFAULT;
373
			rdev->pm.profile = PM_PROFILE_DEFAULT;
370
		else if (strncmp("auto", buf, strlen("auto")) == 0)
374
		else if (strncmp("auto", buf, strlen("auto")) == 0)
371
			rdev->pm.profile = PM_PROFILE_AUTO;
375
			rdev->pm.profile = PM_PROFILE_AUTO;
372
		else if (strncmp("low", buf, strlen("low")) == 0)
376
		else if (strncmp("low", buf, strlen("low")) == 0)
373
			rdev->pm.profile = PM_PROFILE_LOW;
377
			rdev->pm.profile = PM_PROFILE_LOW;
374
		else if (strncmp("mid", buf, strlen("mid")) == 0)
378
		else if (strncmp("mid", buf, strlen("mid")) == 0)
375
			rdev->pm.profile = PM_PROFILE_MID;
379
			rdev->pm.profile = PM_PROFILE_MID;
376
		else if (strncmp("high", buf, strlen("high")) == 0)
380
		else if (strncmp("high", buf, strlen("high")) == 0)
377
			rdev->pm.profile = PM_PROFILE_HIGH;
381
			rdev->pm.profile = PM_PROFILE_HIGH;
378
		else {
382
		else {
379
			count = -EINVAL;
383
			count = -EINVAL;
380
			goto fail;
384
			goto fail;
381
		}
385
		}
382
		radeon_pm_update_profile(rdev);
386
		radeon_pm_update_profile(rdev);
383
		radeon_pm_set_clocks(rdev);
387
		radeon_pm_set_clocks(rdev);
384
	} else
388
	} else
385
		count = -EINVAL;
389
		count = -EINVAL;
386
 
390
 
387
fail:
391
fail:
388
	mutex_unlock(&rdev->pm.mutex);
392
	mutex_unlock(&rdev->pm.mutex);
389
 
393
 
390
	return count;
394
	return count;
391
}
395
}
392
 
396
 
393
static ssize_t radeon_get_pm_method(struct device *dev,
397
static ssize_t radeon_get_pm_method(struct device *dev,
394
				    struct device_attribute *attr,
398
				    struct device_attribute *attr,
395
				    char *buf)
399
				    char *buf)
396
{
400
{
397
	struct drm_device *ddev = dev_get_drvdata(dev);
401
	struct drm_device *ddev = dev_get_drvdata(dev);
398
	struct radeon_device *rdev = ddev->dev_private;
402
	struct radeon_device *rdev = ddev->dev_private;
399
	int pm = rdev->pm.pm_method;
403
	int pm = rdev->pm.pm_method;
400
 
404
 
401
	return snprintf(buf, PAGE_SIZE, "%s\n",
405
	return snprintf(buf, PAGE_SIZE, "%s\n",
402
			(pm == PM_METHOD_DYNPM) ? "dynpm" :
406
			(pm == PM_METHOD_DYNPM) ? "dynpm" :
403
			(pm == PM_METHOD_PROFILE) ? "profile" : "dpm");
407
			(pm == PM_METHOD_PROFILE) ? "profile" : "dpm");
404
}
408
}
405
 
409
 
406
static ssize_t radeon_set_pm_method(struct device *dev,
410
static ssize_t radeon_set_pm_method(struct device *dev,
407
				    struct device_attribute *attr,
411
				    struct device_attribute *attr,
408
				    const char *buf,
412
				    const char *buf,
409
				    size_t count)
413
				    size_t count)
410
{
414
{
411
	struct drm_device *ddev = dev_get_drvdata(dev);
415
	struct drm_device *ddev = dev_get_drvdata(dev);
412
	struct radeon_device *rdev = ddev->dev_private;
416
	struct radeon_device *rdev = ddev->dev_private;
413
 
417
 
414
	/* Can't set method when the card is off */
418
	/* Can't set method when the card is off */
415
	if  ((rdev->flags & RADEON_IS_PX) &&
419
	if  ((rdev->flags & RADEON_IS_PX) &&
416
	     (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) {
420
	     (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) {
417
		count = -EINVAL;
421
		count = -EINVAL;
418
		goto fail;
422
		goto fail;
419
	}
423
	}
420
 
424
 
421
	/* we don't support the legacy modes with dpm */
425
	/* we don't support the legacy modes with dpm */
422
	if (rdev->pm.pm_method == PM_METHOD_DPM) {
426
	if (rdev->pm.pm_method == PM_METHOD_DPM) {
423
		count = -EINVAL;
427
		count = -EINVAL;
424
		goto fail;
428
		goto fail;
425
	}
429
	}
426
 
430
 
427
	if (strncmp("dynpm", buf, strlen("dynpm")) == 0) {
431
	if (strncmp("dynpm", buf, strlen("dynpm")) == 0) {
428
		mutex_lock(&rdev->pm.mutex);
432
		mutex_lock(&rdev->pm.mutex);
429
		rdev->pm.pm_method = PM_METHOD_DYNPM;
433
		rdev->pm.pm_method = PM_METHOD_DYNPM;
430
		rdev->pm.dynpm_state = DYNPM_STATE_PAUSED;
434
		rdev->pm.dynpm_state = DYNPM_STATE_PAUSED;
431
		rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
435
		rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
432
		mutex_unlock(&rdev->pm.mutex);
436
		mutex_unlock(&rdev->pm.mutex);
433
	} else if (strncmp("profile", buf, strlen("profile")) == 0) {
437
	} else if (strncmp("profile", buf, strlen("profile")) == 0) {
434
		mutex_lock(&rdev->pm.mutex);
438
		mutex_lock(&rdev->pm.mutex);
435
		/* disable dynpm */
439
		/* disable dynpm */
436
		rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
440
		rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
437
		rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
441
		rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
438
		rdev->pm.pm_method = PM_METHOD_PROFILE;
442
		rdev->pm.pm_method = PM_METHOD_PROFILE;
439
		mutex_unlock(&rdev->pm.mutex);
443
		mutex_unlock(&rdev->pm.mutex);
440
//		cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work);
444
//		cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work);
441
	} else {
445
	} else {
442
		count = -EINVAL;
446
		count = -EINVAL;
443
		goto fail;
447
		goto fail;
444
	}
448
	}
445
	radeon_pm_compute_clocks(rdev);
449
	radeon_pm_compute_clocks(rdev);
446
fail:
450
fail:
447
	return count;
451
	return count;
448
}
452
}
449
 
453
 
450
static ssize_t radeon_get_dpm_state(struct device *dev,
454
static ssize_t radeon_get_dpm_state(struct device *dev,
451
				    struct device_attribute *attr,
455
				    struct device_attribute *attr,
452
				    char *buf)
456
				    char *buf)
453
{
457
{
454
	struct drm_device *ddev = dev_get_drvdata(dev);
458
	struct drm_device *ddev = dev_get_drvdata(dev);
455
	struct radeon_device *rdev = ddev->dev_private;
459
	struct radeon_device *rdev = ddev->dev_private;
456
	enum radeon_pm_state_type pm = rdev->pm.dpm.user_state;
460
	enum radeon_pm_state_type pm = rdev->pm.dpm.user_state;
457
 
461
 
458
	return snprintf(buf, PAGE_SIZE, "%s\n",
462
	return snprintf(buf, PAGE_SIZE, "%s\n",
459
			(pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
463
			(pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
460
			(pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance");
464
			(pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance");
461
}
465
}
462
 
466
 
463
static ssize_t radeon_set_dpm_state(struct device *dev,
467
static ssize_t radeon_set_dpm_state(struct device *dev,
464
				    struct device_attribute *attr,
468
				    struct device_attribute *attr,
465
				    const char *buf,
469
				    const char *buf,
466
				    size_t count)
470
				    size_t count)
467
{
471
{
468
	struct drm_device *ddev = dev_get_drvdata(dev);
472
	struct drm_device *ddev = dev_get_drvdata(dev);
469
	struct radeon_device *rdev = ddev->dev_private;
473
	struct radeon_device *rdev = ddev->dev_private;
470
 
474
 
471
	mutex_lock(&rdev->pm.mutex);
475
	mutex_lock(&rdev->pm.mutex);
472
	if (strncmp("battery", buf, strlen("battery")) == 0)
476
	if (strncmp("battery", buf, strlen("battery")) == 0)
473
		rdev->pm.dpm.user_state = POWER_STATE_TYPE_BATTERY;
477
		rdev->pm.dpm.user_state = POWER_STATE_TYPE_BATTERY;
474
	else if (strncmp("balanced", buf, strlen("balanced")) == 0)
478
	else if (strncmp("balanced", buf, strlen("balanced")) == 0)
475
		rdev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED;
479
		rdev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED;
476
	else if (strncmp("performance", buf, strlen("performance")) == 0)
480
	else if (strncmp("performance", buf, strlen("performance")) == 0)
477
		rdev->pm.dpm.user_state = POWER_STATE_TYPE_PERFORMANCE;
481
		rdev->pm.dpm.user_state = POWER_STATE_TYPE_PERFORMANCE;
478
	else {
482
	else {
479
		mutex_unlock(&rdev->pm.mutex);
483
		mutex_unlock(&rdev->pm.mutex);
480
		count = -EINVAL;
484
		count = -EINVAL;
481
		goto fail;
485
		goto fail;
482
	}
486
	}
483
	mutex_unlock(&rdev->pm.mutex);
487
	mutex_unlock(&rdev->pm.mutex);
484
 
488
 
485
	/* Can't set dpm state when the card is off */
489
	/* Can't set dpm state when the card is off */
486
	if (!(rdev->flags & RADEON_IS_PX) ||
490
	if (!(rdev->flags & RADEON_IS_PX) ||
487
	    (ddev->switch_power_state == DRM_SWITCH_POWER_ON))
491
	    (ddev->switch_power_state == DRM_SWITCH_POWER_ON))
488
		radeon_pm_compute_clocks(rdev);
492
		radeon_pm_compute_clocks(rdev);
489
 
493
 
490
fail:
494
fail:
491
	return count;
495
	return count;
492
}
496
}
493
 
497
 
494
static ssize_t radeon_get_dpm_forced_performance_level(struct device *dev,
498
static ssize_t radeon_get_dpm_forced_performance_level(struct device *dev,
495
						       struct device_attribute *attr,
499
						       struct device_attribute *attr,
496
						       char *buf)
500
						       char *buf)
497
{
501
{
498
	struct drm_device *ddev = dev_get_drvdata(dev);
502
	struct drm_device *ddev = dev_get_drvdata(dev);
499
	struct radeon_device *rdev = ddev->dev_private;
503
	struct radeon_device *rdev = ddev->dev_private;
500
	enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level;
504
	enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level;
501
 
505
 
502
	if  ((rdev->flags & RADEON_IS_PX) &&
506
	if  ((rdev->flags & RADEON_IS_PX) &&
503
	     (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
507
	     (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
504
		return snprintf(buf, PAGE_SIZE, "off\n");
508
		return snprintf(buf, PAGE_SIZE, "off\n");
505
 
509
 
506
	return snprintf(buf, PAGE_SIZE, "%s\n",
510
	return snprintf(buf, PAGE_SIZE, "%s\n",
507
			(level == RADEON_DPM_FORCED_LEVEL_AUTO) ? "auto" :
511
			(level == RADEON_DPM_FORCED_LEVEL_AUTO) ? "auto" :
508
			(level == RADEON_DPM_FORCED_LEVEL_LOW) ? "low" : "high");
512
			(level == RADEON_DPM_FORCED_LEVEL_LOW) ? "low" : "high");
509
}
513
}
510
 
514
 
511
static ssize_t radeon_set_dpm_forced_performance_level(struct device *dev,
515
static ssize_t radeon_set_dpm_forced_performance_level(struct device *dev,
512
						       struct device_attribute *attr,
516
						       struct device_attribute *attr,
513
						       const char *buf,
517
						       const char *buf,
514
						       size_t count)
518
						       size_t count)
515
{
519
{
516
	struct drm_device *ddev = dev_get_drvdata(dev);
520
	struct drm_device *ddev = dev_get_drvdata(dev);
517
	struct radeon_device *rdev = ddev->dev_private;
521
	struct radeon_device *rdev = ddev->dev_private;
518
	enum radeon_dpm_forced_level level;
522
	enum radeon_dpm_forced_level level;
519
	int ret = 0;
523
	int ret = 0;
520
 
524
 
521
	/* Can't force performance level when the card is off */
525
	/* Can't force performance level when the card is off */
522
	if  ((rdev->flags & RADEON_IS_PX) &&
526
	if  ((rdev->flags & RADEON_IS_PX) &&
523
	     (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
527
	     (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
524
		return -EINVAL;
528
		return -EINVAL;
525
 
529
 
526
	mutex_lock(&rdev->pm.mutex);
530
	mutex_lock(&rdev->pm.mutex);
527
	if (strncmp("low", buf, strlen("low")) == 0) {
531
	if (strncmp("low", buf, strlen("low")) == 0) {
528
		level = RADEON_DPM_FORCED_LEVEL_LOW;
532
		level = RADEON_DPM_FORCED_LEVEL_LOW;
529
	} else if (strncmp("high", buf, strlen("high")) == 0) {
533
	} else if (strncmp("high", buf, strlen("high")) == 0) {
530
		level = RADEON_DPM_FORCED_LEVEL_HIGH;
534
		level = RADEON_DPM_FORCED_LEVEL_HIGH;
531
	} else if (strncmp("auto", buf, strlen("auto")) == 0) {
535
	} else if (strncmp("auto", buf, strlen("auto")) == 0) {
532
		level = RADEON_DPM_FORCED_LEVEL_AUTO;
536
		level = RADEON_DPM_FORCED_LEVEL_AUTO;
533
	} else {
537
	} else {
534
		count = -EINVAL;
538
		count = -EINVAL;
535
		goto fail;
539
		goto fail;
536
	}
540
	}
537
	if (rdev->asic->dpm.force_performance_level) {
541
	if (rdev->asic->dpm.force_performance_level) {
538
		if (rdev->pm.dpm.thermal_active) {
542
		if (rdev->pm.dpm.thermal_active) {
539
			count = -EINVAL;
543
			count = -EINVAL;
540
			goto fail;
544
			goto fail;
541
		}
545
		}
542
		ret = radeon_dpm_force_performance_level(rdev, level);
546
		ret = radeon_dpm_force_performance_level(rdev, level);
543
		if (ret)
547
		if (ret)
544
			count = -EINVAL;
548
			count = -EINVAL;
545
	}
549
	}
546
fail:
550
fail:
547
	mutex_unlock(&rdev->pm.mutex);
551
	mutex_unlock(&rdev->pm.mutex);
548
 
552
 
549
	return count;
553
	return count;
550
}
554
}
551
 
555
 
552
 
556
 
553
static ssize_t radeon_hwmon_show_temp(struct device *dev,
557
static ssize_t radeon_hwmon_show_temp(struct device *dev,
554
				      struct device_attribute *attr,
558
				      struct device_attribute *attr,
555
				      char *buf)
559
				      char *buf)
556
{
560
{
557
	struct radeon_device *rdev = dev_get_drvdata(dev);
561
	struct radeon_device *rdev = dev_get_drvdata(dev);
558
	struct drm_device *ddev = rdev->ddev;
562
	struct drm_device *ddev = rdev->ddev;
559
	int temp;
563
	int temp;
560
 
564
 
561
	/* Can't get temperature when the card is off */
565
	/* Can't get temperature when the card is off */
562
	if  ((rdev->flags & RADEON_IS_PX) &&
566
	if  ((rdev->flags & RADEON_IS_PX) &&
563
	     (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
567
	     (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
564
		return -EINVAL;
568
		return -EINVAL;
565
 
569
 
566
	if (rdev->asic->pm.get_temperature)
570
	if (rdev->asic->pm.get_temperature)
567
		temp = radeon_get_temperature(rdev);
571
		temp = radeon_get_temperature(rdev);
568
	else
572
	else
569
		temp = 0;
573
		temp = 0;
570
 
574
 
571
	return snprintf(buf, PAGE_SIZE, "%d\n", temp);
575
	return snprintf(buf, PAGE_SIZE, "%d\n", temp);
572
}
576
}
573
 
577
 
574
static ssize_t radeon_hwmon_show_temp_thresh(struct device *dev,
578
static ssize_t radeon_hwmon_show_temp_thresh(struct device *dev,
575
					     struct device_attribute *attr,
579
					     struct device_attribute *attr,
576
					     char *buf)
580
					     char *buf)
577
{
581
{
578
	struct radeon_device *rdev = dev_get_drvdata(dev);
582
	struct radeon_device *rdev = dev_get_drvdata(dev);
579
//	int hyst = to_sensor_dev_attr(attr)->index;
583
//	int hyst = to_sensor_dev_attr(attr)->index;
580
	int temp;
584
	int temp;
581
 
585
 
582
//	if (hyst)
586
//	if (hyst)
583
//		temp = rdev->pm.dpm.thermal.min_temp;
587
//		temp = rdev->pm.dpm.thermal.min_temp;
584
//	else
588
//	else
585
		temp = rdev->pm.dpm.thermal.max_temp;
589
		temp = rdev->pm.dpm.thermal.max_temp;
586
 
590
 
587
	return snprintf(buf, PAGE_SIZE, "%d\n", temp);
591
	return snprintf(buf, PAGE_SIZE, "%d\n", temp);
588
}
592
}
589
 
593
 
590
 
594
 
591
static struct attribute *hwmon_attributes[] = {
595
static struct attribute *hwmon_attributes[] = {
592
//	&sensor_dev_attr_temp1_input.dev_attr.attr,
596
//	&sensor_dev_attr_temp1_input.dev_attr.attr,
593
//	&sensor_dev_attr_temp1_crit.dev_attr.attr,
597
//	&sensor_dev_attr_temp1_crit.dev_attr.attr,
594
//	&sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
598
//	&sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
595
	NULL
599
	NULL
596
};
600
};
597
 
601
 
598
 
602
 
599
 
603
 
600
static int radeon_hwmon_init(struct radeon_device *rdev)
604
static int radeon_hwmon_init(struct radeon_device *rdev)
601
{
605
{
602
	int err = 0;
606
	int err = 0;
603
 
607
 
604
	switch (rdev->pm.int_thermal_type) {
608
	switch (rdev->pm.int_thermal_type) {
605
	case THERMAL_TYPE_RV6XX:
609
	case THERMAL_TYPE_RV6XX:
606
	case THERMAL_TYPE_RV770:
610
	case THERMAL_TYPE_RV770:
607
	case THERMAL_TYPE_EVERGREEN:
611
	case THERMAL_TYPE_EVERGREEN:
608
	case THERMAL_TYPE_NI:
612
	case THERMAL_TYPE_NI:
609
	case THERMAL_TYPE_SUMO:
613
	case THERMAL_TYPE_SUMO:
610
	case THERMAL_TYPE_SI:
614
	case THERMAL_TYPE_SI:
611
	case THERMAL_TYPE_CI:
615
	case THERMAL_TYPE_CI:
612
	case THERMAL_TYPE_KV:
616
	case THERMAL_TYPE_KV:
613
		if (rdev->asic->pm.get_temperature == NULL)
617
		if (rdev->asic->pm.get_temperature == NULL)
614
			return err;
618
			return err;
615
 
619
 
616
		break;
620
		break;
617
	default:
621
	default:
618
		break;
622
		break;
619
	}
623
	}
620
 
624
 
621
	return err;
625
	return err;
622
}
626
}
623
 
627
 
624
static void radeon_hwmon_fini(struct radeon_device *rdev)
628
static void radeon_hwmon_fini(struct radeon_device *rdev)
625
{
629
{
626
//   if (rdev->pm.int_hwmon_dev)
630
//   if (rdev->pm.int_hwmon_dev)
627
//       hwmon_device_unregister(rdev->pm.int_hwmon_dev);
631
//       hwmon_device_unregister(rdev->pm.int_hwmon_dev);
628
}
632
}
629
 
633
 
630
static void radeon_dpm_thermal_work_handler(struct work_struct *work)
634
static void radeon_dpm_thermal_work_handler(struct work_struct *work)
631
{
635
{
632
	struct radeon_device *rdev =
636
	struct radeon_device *rdev =
633
		container_of(work, struct radeon_device,
637
		container_of(work, struct radeon_device,
634
			     pm.dpm.thermal.work);
638
			     pm.dpm.thermal.work);
635
	/* switch to the thermal state */
639
	/* switch to the thermal state */
636
	enum radeon_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL;
640
	enum radeon_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL;
637
 
641
 
638
	if (!rdev->pm.dpm_enabled)
642
	if (!rdev->pm.dpm_enabled)
639
		return;
643
		return;
640
 
644
 
641
	if (rdev->asic->pm.get_temperature) {
645
	if (rdev->asic->pm.get_temperature) {
642
		int temp = radeon_get_temperature(rdev);
646
		int temp = radeon_get_temperature(rdev);
643
 
647
 
644
		if (temp < rdev->pm.dpm.thermal.min_temp)
648
		if (temp < rdev->pm.dpm.thermal.min_temp)
645
			/* switch back the user state */
649
			/* switch back the user state */
646
			dpm_state = rdev->pm.dpm.user_state;
650
			dpm_state = rdev->pm.dpm.user_state;
647
	} else {
651
	} else {
648
		if (rdev->pm.dpm.thermal.high_to_low)
652
		if (rdev->pm.dpm.thermal.high_to_low)
649
			/* switch back the user state */
653
			/* switch back the user state */
650
			dpm_state = rdev->pm.dpm.user_state;
654
			dpm_state = rdev->pm.dpm.user_state;
651
	}
655
	}
652
	mutex_lock(&rdev->pm.mutex);
656
	mutex_lock(&rdev->pm.mutex);
653
	if (dpm_state == POWER_STATE_TYPE_INTERNAL_THERMAL)
657
	if (dpm_state == POWER_STATE_TYPE_INTERNAL_THERMAL)
654
		rdev->pm.dpm.thermal_active = true;
658
		rdev->pm.dpm.thermal_active = true;
655
	else
659
	else
656
		rdev->pm.dpm.thermal_active = false;
660
		rdev->pm.dpm.thermal_active = false;
657
	rdev->pm.dpm.state = dpm_state;
661
	rdev->pm.dpm.state = dpm_state;
658
	mutex_unlock(&rdev->pm.mutex);
662
	mutex_unlock(&rdev->pm.mutex);
659
 
663
 
660
	radeon_pm_compute_clocks(rdev);
664
	radeon_pm_compute_clocks(rdev);
661
}
665
}
662
 
666
 
663
static bool radeon_dpm_single_display(struct radeon_device *rdev)
667
static bool radeon_dpm_single_display(struct radeon_device *rdev)
664
{
668
{
665
	bool single_display = (rdev->pm.dpm.new_active_crtc_count < 2) ?
669
	bool single_display = (rdev->pm.dpm.new_active_crtc_count < 2) ?
666
		true : false;
670
		true : false;
667
 
671
 
668
	/* check if the vblank period is too short to adjust the mclk */
672
	/* check if the vblank period is too short to adjust the mclk */
669
	if (single_display && rdev->asic->dpm.vblank_too_short) {
673
	if (single_display && rdev->asic->dpm.vblank_too_short) {
670
		if (radeon_dpm_vblank_too_short(rdev))
674
		if (radeon_dpm_vblank_too_short(rdev))
671
			single_display = false;
675
			single_display = false;
672
	}
676
	}
673
 
677
 
674
	/* 120hz tends to be problematic even if they are under the
678
	/* 120hz tends to be problematic even if they are under the
675
	 * vblank limit.
679
	 * vblank limit.
676
	 */
680
	 */
677
	if (single_display && (r600_dpm_get_vrefresh(rdev) >= 120))
681
	if (single_display && (r600_dpm_get_vrefresh(rdev) >= 120))
678
		single_display = false;
682
		single_display = false;
679
 
683
 
680
	return single_display;
684
	return single_display;
681
}
685
}
682
 
686
 
683
static struct radeon_ps *radeon_dpm_pick_power_state(struct radeon_device *rdev,
687
static struct radeon_ps *radeon_dpm_pick_power_state(struct radeon_device *rdev,
684
						     enum radeon_pm_state_type dpm_state)
688
						     enum radeon_pm_state_type dpm_state)
685
{
689
{
686
	int i;
690
	int i;
687
	struct radeon_ps *ps;
691
	struct radeon_ps *ps;
688
	u32 ui_class;
692
	u32 ui_class;
689
	bool single_display = radeon_dpm_single_display(rdev);
693
	bool single_display = radeon_dpm_single_display(rdev);
690
 
694
 
691
	/* certain older asics have a separare 3D performance state,
695
	/* certain older asics have a separare 3D performance state,
692
	 * so try that first if the user selected performance
696
	 * so try that first if the user selected performance
693
	 */
697
	 */
694
	if (dpm_state == POWER_STATE_TYPE_PERFORMANCE)
698
	if (dpm_state == POWER_STATE_TYPE_PERFORMANCE)
695
		dpm_state = POWER_STATE_TYPE_INTERNAL_3DPERF;
699
		dpm_state = POWER_STATE_TYPE_INTERNAL_3DPERF;
696
	/* balanced states don't exist at the moment */
700
	/* balanced states don't exist at the moment */
697
	if (dpm_state == POWER_STATE_TYPE_BALANCED)
701
	if (dpm_state == POWER_STATE_TYPE_BALANCED)
698
		dpm_state = POWER_STATE_TYPE_PERFORMANCE;
702
		dpm_state = POWER_STATE_TYPE_PERFORMANCE;
699
 
703
 
700
restart_search:
704
restart_search:
701
	/* Pick the best power state based on current conditions */
705
	/* Pick the best power state based on current conditions */
702
	for (i = 0; i < rdev->pm.dpm.num_ps; i++) {
706
	for (i = 0; i < rdev->pm.dpm.num_ps; i++) {
703
		ps = &rdev->pm.dpm.ps[i];
707
		ps = &rdev->pm.dpm.ps[i];
704
		ui_class = ps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK;
708
		ui_class = ps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK;
705
		switch (dpm_state) {
709
		switch (dpm_state) {
706
		/* user states */
710
		/* user states */
707
		case POWER_STATE_TYPE_BATTERY:
711
		case POWER_STATE_TYPE_BATTERY:
708
			if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) {
712
			if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) {
709
				if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
713
				if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
710
					if (single_display)
714
					if (single_display)
711
						return ps;
715
						return ps;
712
				} else
716
				} else
713
					return ps;
717
					return ps;
714
			}
718
			}
715
			break;
719
			break;
716
		case POWER_STATE_TYPE_BALANCED:
720
		case POWER_STATE_TYPE_BALANCED:
717
			if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BALANCED) {
721
			if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BALANCED) {
718
				if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
722
				if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
719
					if (single_display)
723
					if (single_display)
720
						return ps;
724
						return ps;
721
				} else
725
				} else
722
					return ps;
726
					return ps;
723
			}
727
			}
724
			break;
728
			break;
725
		case POWER_STATE_TYPE_PERFORMANCE:
729
		case POWER_STATE_TYPE_PERFORMANCE:
726
			if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) {
730
			if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) {
727
				if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
731
				if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
728
					if (single_display)
732
					if (single_display)
729
						return ps;
733
						return ps;
730
				} else
734
				} else
731
					return ps;
735
					return ps;
732
			}
736
			}
733
			break;
737
			break;
734
		/* internal states */
738
		/* internal states */
735
		case POWER_STATE_TYPE_INTERNAL_UVD:
739
		case POWER_STATE_TYPE_INTERNAL_UVD:
736
			if (rdev->pm.dpm.uvd_ps)
740
			if (rdev->pm.dpm.uvd_ps)
737
				return rdev->pm.dpm.uvd_ps;
741
				return rdev->pm.dpm.uvd_ps;
738
			else
742
			else
739
				break;
743
				break;
740
		case POWER_STATE_TYPE_INTERNAL_UVD_SD:
744
		case POWER_STATE_TYPE_INTERNAL_UVD_SD:
741
			if (ps->class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
745
			if (ps->class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
742
				return ps;
746
				return ps;
743
			break;
747
			break;
744
		case POWER_STATE_TYPE_INTERNAL_UVD_HD:
748
		case POWER_STATE_TYPE_INTERNAL_UVD_HD:
745
			if (ps->class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
749
			if (ps->class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
746
				return ps;
750
				return ps;
747
			break;
751
			break;
748
		case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
752
		case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
749
			if (ps->class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
753
			if (ps->class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
750
				return ps;
754
				return ps;
751
			break;
755
			break;
752
		case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
756
		case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
753
			if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
757
			if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
754
				return ps;
758
				return ps;
755
			break;
759
			break;
756
		case POWER_STATE_TYPE_INTERNAL_BOOT:
760
		case POWER_STATE_TYPE_INTERNAL_BOOT:
757
			return rdev->pm.dpm.boot_ps;
761
			return rdev->pm.dpm.boot_ps;
758
		case POWER_STATE_TYPE_INTERNAL_THERMAL:
762
		case POWER_STATE_TYPE_INTERNAL_THERMAL:
759
			if (ps->class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
763
			if (ps->class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
760
				return ps;
764
				return ps;
761
			break;
765
			break;
762
		case POWER_STATE_TYPE_INTERNAL_ACPI:
766
		case POWER_STATE_TYPE_INTERNAL_ACPI:
763
			if (ps->class & ATOM_PPLIB_CLASSIFICATION_ACPI)
767
			if (ps->class & ATOM_PPLIB_CLASSIFICATION_ACPI)
764
				return ps;
768
				return ps;
765
			break;
769
			break;
766
		case POWER_STATE_TYPE_INTERNAL_ULV:
770
		case POWER_STATE_TYPE_INTERNAL_ULV:
767
			if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
771
			if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
768
				return ps;
772
				return ps;
769
			break;
773
			break;
770
		case POWER_STATE_TYPE_INTERNAL_3DPERF:
774
		case POWER_STATE_TYPE_INTERNAL_3DPERF:
771
			if (ps->class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
775
			if (ps->class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
772
				return ps;
776
				return ps;
773
			break;
777
			break;
774
		default:
778
		default:
775
			break;
779
			break;
776
		}
780
		}
777
	}
781
	}
778
	/* use a fallback state if we didn't match */
782
	/* use a fallback state if we didn't match */
779
	switch (dpm_state) {
783
	switch (dpm_state) {
780
	case POWER_STATE_TYPE_INTERNAL_UVD_SD:
784
	case POWER_STATE_TYPE_INTERNAL_UVD_SD:
781
		dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
785
		dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
782
		goto restart_search;
786
		goto restart_search;
783
	case POWER_STATE_TYPE_INTERNAL_UVD_HD:
787
	case POWER_STATE_TYPE_INTERNAL_UVD_HD:
784
	case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
788
	case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
785
	case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
789
	case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
786
		if (rdev->pm.dpm.uvd_ps) {
790
		if (rdev->pm.dpm.uvd_ps) {
787
			return rdev->pm.dpm.uvd_ps;
791
			return rdev->pm.dpm.uvd_ps;
788
		} else {
792
		} else {
789
			dpm_state = POWER_STATE_TYPE_PERFORMANCE;
793
			dpm_state = POWER_STATE_TYPE_PERFORMANCE;
790
			goto restart_search;
794
			goto restart_search;
791
		}
795
		}
792
	case POWER_STATE_TYPE_INTERNAL_THERMAL:
796
	case POWER_STATE_TYPE_INTERNAL_THERMAL:
793
		dpm_state = POWER_STATE_TYPE_INTERNAL_ACPI;
797
		dpm_state = POWER_STATE_TYPE_INTERNAL_ACPI;
794
		goto restart_search;
798
		goto restart_search;
795
	case POWER_STATE_TYPE_INTERNAL_ACPI:
799
	case POWER_STATE_TYPE_INTERNAL_ACPI:
796
		dpm_state = POWER_STATE_TYPE_BATTERY;
800
		dpm_state = POWER_STATE_TYPE_BATTERY;
797
		goto restart_search;
801
		goto restart_search;
798
	case POWER_STATE_TYPE_BATTERY:
802
	case POWER_STATE_TYPE_BATTERY:
799
	case POWER_STATE_TYPE_BALANCED:
803
	case POWER_STATE_TYPE_BALANCED:
800
	case POWER_STATE_TYPE_INTERNAL_3DPERF:
804
	case POWER_STATE_TYPE_INTERNAL_3DPERF:
801
		dpm_state = POWER_STATE_TYPE_PERFORMANCE;
805
		dpm_state = POWER_STATE_TYPE_PERFORMANCE;
802
		goto restart_search;
806
		goto restart_search;
803
	default:
807
	default:
804
		break;
808
		break;
805
	}
809
	}
806
 
810
 
807
	return NULL;
811
	return NULL;
808
}
812
}
809
 
813
 
810
static void radeon_dpm_change_power_state_locked(struct radeon_device *rdev)
814
static void radeon_dpm_change_power_state_locked(struct radeon_device *rdev)
811
{
815
{
812
	int i;
816
	int i;
813
	struct radeon_ps *ps;
817
	struct radeon_ps *ps;
814
	enum radeon_pm_state_type dpm_state;
818
	enum radeon_pm_state_type dpm_state;
815
	int ret;
819
	int ret;
816
	bool single_display = radeon_dpm_single_display(rdev);
820
	bool single_display = radeon_dpm_single_display(rdev);
817
 
821
 
818
	/* if dpm init failed */
822
	/* if dpm init failed */
819
	if (!rdev->pm.dpm_enabled)
823
	if (!rdev->pm.dpm_enabled)
820
		return;
824
		return;
821
 
825
 
822
	if (rdev->pm.dpm.user_state != rdev->pm.dpm.state) {
826
	if (rdev->pm.dpm.user_state != rdev->pm.dpm.state) {
823
		/* add other state override checks here */
827
		/* add other state override checks here */
824
		if ((!rdev->pm.dpm.thermal_active) &&
828
		if ((!rdev->pm.dpm.thermal_active) &&
825
		    (!rdev->pm.dpm.uvd_active))
829
		    (!rdev->pm.dpm.uvd_active))
826
			rdev->pm.dpm.state = rdev->pm.dpm.user_state;
830
			rdev->pm.dpm.state = rdev->pm.dpm.user_state;
827
	}
831
	}
828
	dpm_state = rdev->pm.dpm.state;
832
	dpm_state = rdev->pm.dpm.state;
829
 
833
 
830
	ps = radeon_dpm_pick_power_state(rdev, dpm_state);
834
	ps = radeon_dpm_pick_power_state(rdev, dpm_state);
831
	if (ps)
835
	if (ps)
832
		rdev->pm.dpm.requested_ps = ps;
836
		rdev->pm.dpm.requested_ps = ps;
833
	else
837
	else
834
		return;
838
		return;
835
 
839
 
836
	/* no need to reprogram if nothing changed unless we are on BTC+ */
840
	/* no need to reprogram if nothing changed unless we are on BTC+ */
837
	if (rdev->pm.dpm.current_ps == rdev->pm.dpm.requested_ps) {
841
	if (rdev->pm.dpm.current_ps == rdev->pm.dpm.requested_ps) {
838
		/* vce just modifies an existing state so force a change */
842
		/* vce just modifies an existing state so force a change */
839
		if (ps->vce_active != rdev->pm.dpm.vce_active)
843
		if (ps->vce_active != rdev->pm.dpm.vce_active)
840
			goto force;
844
			goto force;
841
		/* user has made a display change (such as timing) */
845
		/* user has made a display change (such as timing) */
842
		if (rdev->pm.dpm.single_display != single_display)
846
		if (rdev->pm.dpm.single_display != single_display)
843
			goto force;
847
			goto force;
844
		if ((rdev->family < CHIP_BARTS) || (rdev->flags & RADEON_IS_IGP)) {
848
		if ((rdev->family < CHIP_BARTS) || (rdev->flags & RADEON_IS_IGP)) {
845
			/* for pre-BTC and APUs if the num crtcs changed but state is the same,
849
			/* for pre-BTC and APUs if the num crtcs changed but state is the same,
846
			 * all we need to do is update the display configuration.
850
			 * all we need to do is update the display configuration.
847
			 */
851
			 */
848
			if (rdev->pm.dpm.new_active_crtcs != rdev->pm.dpm.current_active_crtcs) {
852
			if (rdev->pm.dpm.new_active_crtcs != rdev->pm.dpm.current_active_crtcs) {
849
				/* update display watermarks based on new power state */
853
				/* update display watermarks based on new power state */
850
				radeon_bandwidth_update(rdev);
854
				radeon_bandwidth_update(rdev);
851
				/* update displays */
855
				/* update displays */
852
				radeon_dpm_display_configuration_changed(rdev);
856
				radeon_dpm_display_configuration_changed(rdev);
853
				rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs;
857
				rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs;
854
				rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count;
858
				rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count;
855
			}
859
			}
856
			return;
860
			return;
857
		} else {
861
		} else {
858
			/* for BTC+ if the num crtcs hasn't changed and state is the same,
862
			/* for BTC+ if the num crtcs hasn't changed and state is the same,
859
			 * nothing to do, if the num crtcs is > 1 and state is the same,
863
			 * nothing to do, if the num crtcs is > 1 and state is the same,
860
			 * update display configuration.
864
			 * update display configuration.
861
			 */
865
			 */
862
			if (rdev->pm.dpm.new_active_crtcs ==
866
			if (rdev->pm.dpm.new_active_crtcs ==
863
			    rdev->pm.dpm.current_active_crtcs) {
867
			    rdev->pm.dpm.current_active_crtcs) {
864
				return;
868
				return;
865
			} else {
869
			} else {
866
				if ((rdev->pm.dpm.current_active_crtc_count > 1) &&
870
				if ((rdev->pm.dpm.current_active_crtc_count > 1) &&
867
				    (rdev->pm.dpm.new_active_crtc_count > 1)) {
871
				    (rdev->pm.dpm.new_active_crtc_count > 1)) {
868
					/* update display watermarks based on new power state */
872
					/* update display watermarks based on new power state */
869
					radeon_bandwidth_update(rdev);
873
					radeon_bandwidth_update(rdev);
870
					/* update displays */
874
					/* update displays */
871
					radeon_dpm_display_configuration_changed(rdev);
875
					radeon_dpm_display_configuration_changed(rdev);
872
					rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs;
876
					rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs;
873
					rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count;
877
					rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count;
874
					return;
878
					return;
875
				}
879
				}
876
			}
880
			}
877
		}
881
		}
878
	}
882
	}
879
 
883
 
880
force:
884
force:
881
	if (radeon_dpm == 1) {
885
	if (radeon_dpm == 1) {
882
		printk("switching from power state:\n");
886
		printk("switching from power state:\n");
883
		radeon_dpm_print_power_state(rdev, rdev->pm.dpm.current_ps);
887
		radeon_dpm_print_power_state(rdev, rdev->pm.dpm.current_ps);
884
		printk("switching to power state:\n");
888
		printk("switching to power state:\n");
885
		radeon_dpm_print_power_state(rdev, rdev->pm.dpm.requested_ps);
889
		radeon_dpm_print_power_state(rdev, rdev->pm.dpm.requested_ps);
886
	}
890
	}
887
 
891
 
888
	down_write(&rdev->pm.mclk_lock);
892
	down_write(&rdev->pm.mclk_lock);
889
	mutex_lock(&rdev->ring_lock);
893
	mutex_lock(&rdev->ring_lock);
890
 
894
 
891
	/* update whether vce is active */
895
	/* update whether vce is active */
892
	ps->vce_active = rdev->pm.dpm.vce_active;
896
	ps->vce_active = rdev->pm.dpm.vce_active;
893
 
897
 
894
	ret = radeon_dpm_pre_set_power_state(rdev);
898
	ret = radeon_dpm_pre_set_power_state(rdev);
895
	if (ret)
899
	if (ret)
896
		goto done;
900
		goto done;
897
 
901
 
898
	/* update display watermarks based on new power state */
902
	/* update display watermarks based on new power state */
899
	radeon_bandwidth_update(rdev);
903
	radeon_bandwidth_update(rdev);
900
	/* update displays */
904
	/* update displays */
901
	radeon_dpm_display_configuration_changed(rdev);
905
	radeon_dpm_display_configuration_changed(rdev);
902
 
906
 
903
	/* wait for the rings to drain */
907
	/* wait for the rings to drain */
904
	for (i = 0; i < RADEON_NUM_RINGS; i++) {
908
	for (i = 0; i < RADEON_NUM_RINGS; i++) {
905
		struct radeon_ring *ring = &rdev->ring[i];
909
		struct radeon_ring *ring = &rdev->ring[i];
906
		if (ring->ready)
910
		if (ring->ready)
907
			radeon_fence_wait_empty(rdev, i);
911
			radeon_fence_wait_empty(rdev, i);
908
	}
912
	}
909
 
913
 
910
	/* program the new power state */
914
	/* program the new power state */
911
	radeon_dpm_set_power_state(rdev);
915
	radeon_dpm_set_power_state(rdev);
912
 
916
 
913
	/* update current power state */
917
	/* update current power state */
914
	rdev->pm.dpm.current_ps = rdev->pm.dpm.requested_ps;
918
	rdev->pm.dpm.current_ps = rdev->pm.dpm.requested_ps;
915
 
919
 
916
	radeon_dpm_post_set_power_state(rdev);
920
	radeon_dpm_post_set_power_state(rdev);
917
 
921
 
918
	rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs;
922
	rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs;
919
	rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count;
923
	rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count;
920
	rdev->pm.dpm.single_display = single_display;
924
	rdev->pm.dpm.single_display = single_display;
921
 
925
 
922
	if (rdev->asic->dpm.force_performance_level) {
926
	if (rdev->asic->dpm.force_performance_level) {
923
		if (rdev->pm.dpm.thermal_active) {
927
		if (rdev->pm.dpm.thermal_active) {
924
			enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level;
928
			enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level;
925
			/* force low perf level for thermal */
929
			/* force low perf level for thermal */
926
			radeon_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_LOW);
930
			radeon_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_LOW);
927
			/* save the user's level */
931
			/* save the user's level */
928
			rdev->pm.dpm.forced_level = level;
932
			rdev->pm.dpm.forced_level = level;
929
		} else {
933
		} else {
930
			/* otherwise, user selected level */
934
			/* otherwise, user selected level */
931
			radeon_dpm_force_performance_level(rdev, rdev->pm.dpm.forced_level);
935
			radeon_dpm_force_performance_level(rdev, rdev->pm.dpm.forced_level);
932
		}
936
		}
933
	}
937
	}
934
 
938
 
935
done:
939
done:
936
	mutex_unlock(&rdev->ring_lock);
940
	mutex_unlock(&rdev->ring_lock);
937
	up_write(&rdev->pm.mclk_lock);
941
	up_write(&rdev->pm.mclk_lock);
938
}
942
}
939
 
943
 
940
void radeon_dpm_enable_uvd(struct radeon_device *rdev, bool enable)
944
void radeon_dpm_enable_uvd(struct radeon_device *rdev, bool enable)
941
{
945
{
942
	enum radeon_pm_state_type dpm_state;
946
	enum radeon_pm_state_type dpm_state;
943
 
947
 
944
	if (rdev->asic->dpm.powergate_uvd) {
948
	if (rdev->asic->dpm.powergate_uvd) {
945
		mutex_lock(&rdev->pm.mutex);
949
		mutex_lock(&rdev->pm.mutex);
946
		/* don't powergate anything if we
950
		/* don't powergate anything if we
947
		   have active but pause streams */
951
		   have active but pause streams */
948
		enable |= rdev->pm.dpm.sd > 0;
952
		enable |= rdev->pm.dpm.sd > 0;
949
		enable |= rdev->pm.dpm.hd > 0;
953
		enable |= rdev->pm.dpm.hd > 0;
950
		/* enable/disable UVD */
954
		/* enable/disable UVD */
951
		radeon_dpm_powergate_uvd(rdev, !enable);
955
		radeon_dpm_powergate_uvd(rdev, !enable);
952
		mutex_unlock(&rdev->pm.mutex);
956
		mutex_unlock(&rdev->pm.mutex);
953
	} else {
957
	} else {
954
		if (enable) {
958
		if (enable) {
955
			mutex_lock(&rdev->pm.mutex);
959
			mutex_lock(&rdev->pm.mutex);
956
			rdev->pm.dpm.uvd_active = true;
960
			rdev->pm.dpm.uvd_active = true;
957
			/* disable this for now */
961
			/* disable this for now */
958
#if 0
962
#if 0
959
			if ((rdev->pm.dpm.sd == 1) && (rdev->pm.dpm.hd == 0))
963
			if ((rdev->pm.dpm.sd == 1) && (rdev->pm.dpm.hd == 0))
960
				dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_SD;
964
				dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_SD;
961
			else if ((rdev->pm.dpm.sd == 2) && (rdev->pm.dpm.hd == 0))
965
			else if ((rdev->pm.dpm.sd == 2) && (rdev->pm.dpm.hd == 0))
962
				dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
966
				dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
963
			else if ((rdev->pm.dpm.sd == 0) && (rdev->pm.dpm.hd == 1))
967
			else if ((rdev->pm.dpm.sd == 0) && (rdev->pm.dpm.hd == 1))
964
				dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
968
				dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
965
			else if ((rdev->pm.dpm.sd == 0) && (rdev->pm.dpm.hd == 2))
969
			else if ((rdev->pm.dpm.sd == 0) && (rdev->pm.dpm.hd == 2))
966
				dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD2;
970
				dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD2;
967
			else
971
			else
968
#endif
972
#endif
969
				dpm_state = POWER_STATE_TYPE_INTERNAL_UVD;
973
				dpm_state = POWER_STATE_TYPE_INTERNAL_UVD;
970
			rdev->pm.dpm.state = dpm_state;
974
			rdev->pm.dpm.state = dpm_state;
971
			mutex_unlock(&rdev->pm.mutex);
975
			mutex_unlock(&rdev->pm.mutex);
972
		} else {
976
		} else {
973
			mutex_lock(&rdev->pm.mutex);
977
			mutex_lock(&rdev->pm.mutex);
974
			rdev->pm.dpm.uvd_active = false;
978
			rdev->pm.dpm.uvd_active = false;
975
			mutex_unlock(&rdev->pm.mutex);
979
			mutex_unlock(&rdev->pm.mutex);
976
		}
980
		}
977
 
981
 
978
		radeon_pm_compute_clocks(rdev);
982
		radeon_pm_compute_clocks(rdev);
979
	}
983
	}
980
}
984
}
981
 
985
 
982
void radeon_dpm_enable_vce(struct radeon_device *rdev, bool enable)
986
void radeon_dpm_enable_vce(struct radeon_device *rdev, bool enable)
983
{
987
{
984
	if (enable) {
988
	if (enable) {
985
		mutex_lock(&rdev->pm.mutex);
989
		mutex_lock(&rdev->pm.mutex);
986
		rdev->pm.dpm.vce_active = true;
990
		rdev->pm.dpm.vce_active = true;
987
		/* XXX select vce level based on ring/task */
991
		/* XXX select vce level based on ring/task */
988
		rdev->pm.dpm.vce_level = RADEON_VCE_LEVEL_AC_ALL;
992
		rdev->pm.dpm.vce_level = RADEON_VCE_LEVEL_AC_ALL;
989
		mutex_unlock(&rdev->pm.mutex);
993
		mutex_unlock(&rdev->pm.mutex);
990
	} else {
994
	} else {
991
		mutex_lock(&rdev->pm.mutex);
995
		mutex_lock(&rdev->pm.mutex);
992
		rdev->pm.dpm.vce_active = false;
996
		rdev->pm.dpm.vce_active = false;
993
		mutex_unlock(&rdev->pm.mutex);
997
		mutex_unlock(&rdev->pm.mutex);
994
	}
998
	}
995
 
999
 
996
	radeon_pm_compute_clocks(rdev);
1000
	radeon_pm_compute_clocks(rdev);
997
}
1001
}
998
 
1002
 
999
static void radeon_pm_suspend_old(struct radeon_device *rdev)
1003
static void radeon_pm_suspend_old(struct radeon_device *rdev)
1000
{
1004
{
1001
	mutex_lock(&rdev->pm.mutex);
1005
	mutex_lock(&rdev->pm.mutex);
1002
	if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
1006
	if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
1003
		if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE)
1007
		if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE)
1004
			rdev->pm.dynpm_state = DYNPM_STATE_SUSPENDED;
1008
			rdev->pm.dynpm_state = DYNPM_STATE_SUSPENDED;
1005
	}
1009
	}
1006
	mutex_unlock(&rdev->pm.mutex);
1010
	mutex_unlock(&rdev->pm.mutex);
1007
 
1011
 
1008
}
1012
}
1009
 
1013
 
1010
static void radeon_pm_suspend_dpm(struct radeon_device *rdev)
1014
static void radeon_pm_suspend_dpm(struct radeon_device *rdev)
1011
{
1015
{
1012
	mutex_lock(&rdev->pm.mutex);
1016
	mutex_lock(&rdev->pm.mutex);
1013
	/* disable dpm */
1017
	/* disable dpm */
1014
	radeon_dpm_disable(rdev);
1018
	radeon_dpm_disable(rdev);
1015
	/* reset the power state */
1019
	/* reset the power state */
1016
	rdev->pm.dpm.current_ps = rdev->pm.dpm.requested_ps = rdev->pm.dpm.boot_ps;
1020
	rdev->pm.dpm.current_ps = rdev->pm.dpm.requested_ps = rdev->pm.dpm.boot_ps;
1017
	rdev->pm.dpm_enabled = false;
1021
	rdev->pm.dpm_enabled = false;
1018
	mutex_unlock(&rdev->pm.mutex);
1022
	mutex_unlock(&rdev->pm.mutex);
1019
}
1023
}
1020
 
1024
 
1021
void radeon_pm_suspend(struct radeon_device *rdev)
1025
void radeon_pm_suspend(struct radeon_device *rdev)
1022
{
1026
{
1023
	if (rdev->pm.pm_method == PM_METHOD_DPM)
1027
	if (rdev->pm.pm_method == PM_METHOD_DPM)
1024
		radeon_pm_suspend_dpm(rdev);
1028
		radeon_pm_suspend_dpm(rdev);
1025
	else
1029
	else
1026
		radeon_pm_suspend_old(rdev);
1030
		radeon_pm_suspend_old(rdev);
1027
}
1031
}
1028
 
1032
 
1029
static void radeon_pm_resume_old(struct radeon_device *rdev)
1033
static void radeon_pm_resume_old(struct radeon_device *rdev)
1030
{
1034
{
1031
	/* set up the default clocks if the MC ucode is loaded */
1035
	/* set up the default clocks if the MC ucode is loaded */
1032
	if ((rdev->family >= CHIP_BARTS) &&
1036
	if ((rdev->family >= CHIP_BARTS) &&
1033
	    (rdev->family <= CHIP_CAYMAN) &&
1037
	    (rdev->family <= CHIP_CAYMAN) &&
1034
	    rdev->mc_fw) {
1038
	    rdev->mc_fw) {
1035
		if (rdev->pm.default_vddc)
1039
		if (rdev->pm.default_vddc)
1036
			radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
1040
			radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
1037
						SET_VOLTAGE_TYPE_ASIC_VDDC);
1041
						SET_VOLTAGE_TYPE_ASIC_VDDC);
1038
		if (rdev->pm.default_vddci)
1042
		if (rdev->pm.default_vddci)
1039
			radeon_atom_set_voltage(rdev, rdev->pm.default_vddci,
1043
			radeon_atom_set_voltage(rdev, rdev->pm.default_vddci,
1040
						SET_VOLTAGE_TYPE_ASIC_VDDCI);
1044
						SET_VOLTAGE_TYPE_ASIC_VDDCI);
1041
		if (rdev->pm.default_sclk)
1045
		if (rdev->pm.default_sclk)
1042
			radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
1046
			radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
1043
		if (rdev->pm.default_mclk)
1047
		if (rdev->pm.default_mclk)
1044
			radeon_set_memory_clock(rdev, rdev->pm.default_mclk);
1048
			radeon_set_memory_clock(rdev, rdev->pm.default_mclk);
1045
	}
1049
	}
1046
	/* asic init will reset the default power state */
1050
	/* asic init will reset the default power state */
1047
	mutex_lock(&rdev->pm.mutex);
1051
	mutex_lock(&rdev->pm.mutex);
1048
	rdev->pm.current_power_state_index = rdev->pm.default_power_state_index;
1052
	rdev->pm.current_power_state_index = rdev->pm.default_power_state_index;
1049
	rdev->pm.current_clock_mode_index = 0;
1053
	rdev->pm.current_clock_mode_index = 0;
1050
	rdev->pm.current_sclk = rdev->pm.default_sclk;
1054
	rdev->pm.current_sclk = rdev->pm.default_sclk;
1051
	rdev->pm.current_mclk = rdev->pm.default_mclk;
1055
	rdev->pm.current_mclk = rdev->pm.default_mclk;
1052
	if (rdev->pm.power_state) {
1056
	if (rdev->pm.power_state) {
1053
		rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage;
1057
		rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage;
1054
		rdev->pm.current_vddci = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.vddci;
1058
		rdev->pm.current_vddci = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.vddci;
1055
	}
1059
	}
1056
	if (rdev->pm.pm_method == PM_METHOD_DYNPM
1060
	if (rdev->pm.pm_method == PM_METHOD_DYNPM
1057
	    && rdev->pm.dynpm_state == DYNPM_STATE_SUSPENDED) {
1061
	    && rdev->pm.dynpm_state == DYNPM_STATE_SUSPENDED) {
1058
		rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
1062
		rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
1059
//		schedule_delayed_work(&rdev->pm.dynpm_idle_work,
1063
//		schedule_delayed_work(&rdev->pm.dynpm_idle_work,
1060
//					msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
1064
//					msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
1061
	}
1065
	}
1062
	mutex_unlock(&rdev->pm.mutex);
1066
	mutex_unlock(&rdev->pm.mutex);
1063
	radeon_pm_compute_clocks(rdev);
1067
	radeon_pm_compute_clocks(rdev);
1064
}
1068
}
1065
 
1069
 
1066
static void radeon_pm_resume_dpm(struct radeon_device *rdev)
1070
static void radeon_pm_resume_dpm(struct radeon_device *rdev)
1067
{
1071
{
1068
	int ret;
1072
	int ret;
1069
 
1073
 
1070
	/* asic init will reset to the boot state */
1074
	/* asic init will reset to the boot state */
1071
	mutex_lock(&rdev->pm.mutex);
1075
	mutex_lock(&rdev->pm.mutex);
1072
	rdev->pm.dpm.current_ps = rdev->pm.dpm.requested_ps = rdev->pm.dpm.boot_ps;
1076
	rdev->pm.dpm.current_ps = rdev->pm.dpm.requested_ps = rdev->pm.dpm.boot_ps;
1073
	radeon_dpm_setup_asic(rdev);
1077
	radeon_dpm_setup_asic(rdev);
1074
	ret = radeon_dpm_enable(rdev);
1078
	ret = radeon_dpm_enable(rdev);
1075
	mutex_unlock(&rdev->pm.mutex);
1079
	mutex_unlock(&rdev->pm.mutex);
1076
	if (ret)
1080
	if (ret)
1077
		goto dpm_resume_fail;
1081
		goto dpm_resume_fail;
1078
	rdev->pm.dpm_enabled = true;
1082
	rdev->pm.dpm_enabled = true;
1079
	return;
1083
	return;
1080
 
1084
 
1081
dpm_resume_fail:
1085
dpm_resume_fail:
1082
	DRM_ERROR("radeon: dpm resume failed\n");
1086
	DRM_ERROR("radeon: dpm resume failed\n");
1083
	if ((rdev->family >= CHIP_BARTS) &&
1087
	if ((rdev->family >= CHIP_BARTS) &&
1084
	    (rdev->family <= CHIP_CAYMAN) &&
1088
	    (rdev->family <= CHIP_CAYMAN) &&
1085
	    rdev->mc_fw) {
1089
	    rdev->mc_fw) {
1086
		if (rdev->pm.default_vddc)
1090
		if (rdev->pm.default_vddc)
1087
			radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
1091
			radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
1088
						SET_VOLTAGE_TYPE_ASIC_VDDC);
1092
						SET_VOLTAGE_TYPE_ASIC_VDDC);
1089
		if (rdev->pm.default_vddci)
1093
		if (rdev->pm.default_vddci)
1090
			radeon_atom_set_voltage(rdev, rdev->pm.default_vddci,
1094
			radeon_atom_set_voltage(rdev, rdev->pm.default_vddci,
1091
						SET_VOLTAGE_TYPE_ASIC_VDDCI);
1095
						SET_VOLTAGE_TYPE_ASIC_VDDCI);
1092
		if (rdev->pm.default_sclk)
1096
		if (rdev->pm.default_sclk)
1093
			radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
1097
			radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
1094
		if (rdev->pm.default_mclk)
1098
		if (rdev->pm.default_mclk)
1095
			radeon_set_memory_clock(rdev, rdev->pm.default_mclk);
1099
			radeon_set_memory_clock(rdev, rdev->pm.default_mclk);
1096
	}
1100
	}
1097
}
1101
}
1098
 
1102
 
1099
void radeon_pm_resume(struct radeon_device *rdev)
1103
void radeon_pm_resume(struct radeon_device *rdev)
1100
{
1104
{
1101
	if (rdev->pm.pm_method == PM_METHOD_DPM)
1105
	if (rdev->pm.pm_method == PM_METHOD_DPM)
1102
		radeon_pm_resume_dpm(rdev);
1106
		radeon_pm_resume_dpm(rdev);
1103
	else
1107
	else
1104
		radeon_pm_resume_old(rdev);
1108
		radeon_pm_resume_old(rdev);
1105
}
1109
}
1106
 
1110
 
1107
static int radeon_pm_init_old(struct radeon_device *rdev)
1111
static int radeon_pm_init_old(struct radeon_device *rdev)
1108
{
1112
{
1109
	int ret;
1113
	int ret;
1110
 
1114
 
1111
	rdev->pm.profile = PM_PROFILE_DEFAULT;
1115
	rdev->pm.profile = PM_PROFILE_DEFAULT;
1112
	rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
1116
	rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
1113
	rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
1117
	rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
1114
	rdev->pm.dynpm_can_upclock = true;
1118
	rdev->pm.dynpm_can_upclock = true;
1115
	rdev->pm.dynpm_can_downclock = true;
1119
	rdev->pm.dynpm_can_downclock = true;
1116
	rdev->pm.default_sclk = rdev->clock.default_sclk;
1120
	rdev->pm.default_sclk = rdev->clock.default_sclk;
1117
	rdev->pm.default_mclk = rdev->clock.default_mclk;
1121
	rdev->pm.default_mclk = rdev->clock.default_mclk;
1118
	rdev->pm.current_sclk = rdev->clock.default_sclk;
1122
	rdev->pm.current_sclk = rdev->clock.default_sclk;
1119
	rdev->pm.current_mclk = rdev->clock.default_mclk;
1123
	rdev->pm.current_mclk = rdev->clock.default_mclk;
1120
	rdev->pm.int_thermal_type = THERMAL_TYPE_NONE;
1124
	rdev->pm.int_thermal_type = THERMAL_TYPE_NONE;
1121
 
1125
 
1122
	if (rdev->bios) {
1126
	if (rdev->bios) {
1123
		if (rdev->is_atom_bios)
1127
		if (rdev->is_atom_bios)
1124
			radeon_atombios_get_power_modes(rdev);
1128
			radeon_atombios_get_power_modes(rdev);
1125
		else
1129
		else
1126
			radeon_combios_get_power_modes(rdev);
1130
			radeon_combios_get_power_modes(rdev);
1127
		radeon_pm_print_states(rdev);
1131
		radeon_pm_print_states(rdev);
1128
		radeon_pm_init_profile(rdev);
1132
		radeon_pm_init_profile(rdev);
1129
		/* set up the default clocks if the MC ucode is loaded */
1133
		/* set up the default clocks if the MC ucode is loaded */
1130
		if ((rdev->family >= CHIP_BARTS) &&
1134
		if ((rdev->family >= CHIP_BARTS) &&
1131
		    (rdev->family <= CHIP_CAYMAN) &&
1135
		    (rdev->family <= CHIP_CAYMAN) &&
1132
		    rdev->mc_fw) {
1136
		    rdev->mc_fw) {
1133
			if (rdev->pm.default_vddc)
1137
			if (rdev->pm.default_vddc)
1134
				radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
1138
				radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
1135
							SET_VOLTAGE_TYPE_ASIC_VDDC);
1139
							SET_VOLTAGE_TYPE_ASIC_VDDC);
1136
			if (rdev->pm.default_vddci)
1140
			if (rdev->pm.default_vddci)
1137
				radeon_atom_set_voltage(rdev, rdev->pm.default_vddci,
1141
				radeon_atom_set_voltage(rdev, rdev->pm.default_vddci,
1138
							SET_VOLTAGE_TYPE_ASIC_VDDCI);
1142
							SET_VOLTAGE_TYPE_ASIC_VDDCI);
1139
			if (rdev->pm.default_sclk)
1143
			if (rdev->pm.default_sclk)
1140
				radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
1144
				radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
1141
			if (rdev->pm.default_mclk)
1145
			if (rdev->pm.default_mclk)
1142
				radeon_set_memory_clock(rdev, rdev->pm.default_mclk);
1146
				radeon_set_memory_clock(rdev, rdev->pm.default_mclk);
1143
		}
1147
		}
1144
	}
1148
	}
1145
 
1149
 
1146
	/* set up the internal thermal sensor if applicable */
1150
	/* set up the internal thermal sensor if applicable */
1147
	ret = radeon_hwmon_init(rdev);
1151
	ret = radeon_hwmon_init(rdev);
1148
	if (ret)
1152
	if (ret)
1149
		return ret;
1153
		return ret;
1150
 
1154
 
1151
//	INIT_DELAYED_WORK(&rdev->pm.dynpm_idle_work, radeon_dynpm_idle_work_handler);
1155
//	INIT_DELAYED_WORK(&rdev->pm.dynpm_idle_work, radeon_dynpm_idle_work_handler);
1152
 
1156
 
1153
	if (rdev->pm.num_power_states > 1) {
1157
	if (rdev->pm.num_power_states > 1) {
1154
		/* where's the best place to put these? */
1158
		/* where's the best place to put these? */
1155
 
1159
 
1156
 
1160
 
1157
		DRM_INFO("radeon: power management initialized\n");
1161
		DRM_INFO("radeon: power management initialized\n");
1158
	}
1162
	}
1159
 
1163
 
1160
	return 0;
1164
	return 0;
1161
}
1165
}
1162
 
1166
 
1163
static void radeon_dpm_print_power_states(struct radeon_device *rdev)
1167
static void radeon_dpm_print_power_states(struct radeon_device *rdev)
1164
{
1168
{
1165
	int i;
1169
	int i;
1166
 
1170
 
1167
	for (i = 0; i < rdev->pm.dpm.num_ps; i++) {
1171
	for (i = 0; i < rdev->pm.dpm.num_ps; i++) {
1168
		printk("== power state %d ==\n", i);
1172
		printk("== power state %d ==\n", i);
1169
		radeon_dpm_print_power_state(rdev, &rdev->pm.dpm.ps[i]);
1173
		radeon_dpm_print_power_state(rdev, &rdev->pm.dpm.ps[i]);
1170
	}
1174
	}
1171
}
1175
}
1172
 
1176
 
1173
static int radeon_pm_init_dpm(struct radeon_device *rdev)
1177
static int radeon_pm_init_dpm(struct radeon_device *rdev)
1174
{
1178
{
1175
	int ret;
1179
	int ret;
1176
 
1180
 
1177
	/* default to balanced state */
1181
	/* default to balanced state */
1178
	rdev->pm.dpm.state = POWER_STATE_TYPE_BALANCED;
1182
	rdev->pm.dpm.state = POWER_STATE_TYPE_BALANCED;
1179
	rdev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED;
1183
	rdev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED;
1180
	rdev->pm.dpm.forced_level = RADEON_DPM_FORCED_LEVEL_AUTO;
1184
	rdev->pm.dpm.forced_level = RADEON_DPM_FORCED_LEVEL_AUTO;
1181
	rdev->pm.default_sclk = rdev->clock.default_sclk;
1185
	rdev->pm.default_sclk = rdev->clock.default_sclk;
1182
	rdev->pm.default_mclk = rdev->clock.default_mclk;
1186
	rdev->pm.default_mclk = rdev->clock.default_mclk;
1183
	rdev->pm.current_sclk = rdev->clock.default_sclk;
1187
	rdev->pm.current_sclk = rdev->clock.default_sclk;
1184
	rdev->pm.current_mclk = rdev->clock.default_mclk;
1188
	rdev->pm.current_mclk = rdev->clock.default_mclk;
1185
	rdev->pm.int_thermal_type = THERMAL_TYPE_NONE;
1189
	rdev->pm.int_thermal_type = THERMAL_TYPE_NONE;
1186
 
1190
 
1187
	if (rdev->bios && rdev->is_atom_bios)
1191
	if (rdev->bios && rdev->is_atom_bios)
1188
		radeon_atombios_get_power_modes(rdev);
1192
		radeon_atombios_get_power_modes(rdev);
1189
	else
1193
	else
1190
		return -EINVAL;
1194
		return -EINVAL;
1191
 
1195
 
1192
	/* set up the internal thermal sensor if applicable */
1196
	/* set up the internal thermal sensor if applicable */
1193
	ret = radeon_hwmon_init(rdev);
1197
	ret = radeon_hwmon_init(rdev);
1194
	if (ret)
1198
	if (ret)
1195
		return ret;
1199
		return ret;
1196
 
1200
 
1197
	INIT_WORK(&rdev->pm.dpm.thermal.work, radeon_dpm_thermal_work_handler);
1201
	INIT_WORK(&rdev->pm.dpm.thermal.work, radeon_dpm_thermal_work_handler);
1198
	mutex_lock(&rdev->pm.mutex);
1202
	mutex_lock(&rdev->pm.mutex);
1199
	radeon_dpm_init(rdev);
1203
	radeon_dpm_init(rdev);
1200
	rdev->pm.dpm.current_ps = rdev->pm.dpm.requested_ps = rdev->pm.dpm.boot_ps;
1204
	rdev->pm.dpm.current_ps = rdev->pm.dpm.requested_ps = rdev->pm.dpm.boot_ps;
1201
	if (radeon_dpm == 1)
1205
	if (radeon_dpm == 1)
1202
		radeon_dpm_print_power_states(rdev);
1206
		radeon_dpm_print_power_states(rdev);
1203
	radeon_dpm_setup_asic(rdev);
1207
	radeon_dpm_setup_asic(rdev);
1204
	ret = radeon_dpm_enable(rdev);
1208
	ret = radeon_dpm_enable(rdev);
1205
	mutex_unlock(&rdev->pm.mutex);
1209
	mutex_unlock(&rdev->pm.mutex);
1206
	if (ret)
1210
	if (ret)
1207
		goto dpm_failed;
1211
		goto dpm_failed;
1208
	rdev->pm.dpm_enabled = true;
1212
	rdev->pm.dpm_enabled = true;
1209
 
1213
 
1210
	DRM_INFO("radeon: dpm initialized\n");
1214
	DRM_INFO("radeon: dpm initialized\n");
1211
 
1215
 
1212
	return 0;
1216
	return 0;
1213
 
1217
 
1214
dpm_failed:
1218
dpm_failed:
1215
	rdev->pm.dpm_enabled = false;
1219
	rdev->pm.dpm_enabled = false;
1216
	if ((rdev->family >= CHIP_BARTS) &&
1220
	if ((rdev->family >= CHIP_BARTS) &&
1217
	    (rdev->family <= CHIP_CAYMAN) &&
1221
	    (rdev->family <= CHIP_CAYMAN) &&
1218
	    rdev->mc_fw) {
1222
	    rdev->mc_fw) {
1219
		if (rdev->pm.default_vddc)
1223
		if (rdev->pm.default_vddc)
1220
			radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
1224
			radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
1221
						SET_VOLTAGE_TYPE_ASIC_VDDC);
1225
						SET_VOLTAGE_TYPE_ASIC_VDDC);
1222
		if (rdev->pm.default_vddci)
1226
		if (rdev->pm.default_vddci)
1223
			radeon_atom_set_voltage(rdev, rdev->pm.default_vddci,
1227
			radeon_atom_set_voltage(rdev, rdev->pm.default_vddci,
1224
						SET_VOLTAGE_TYPE_ASIC_VDDCI);
1228
						SET_VOLTAGE_TYPE_ASIC_VDDCI);
1225
		if (rdev->pm.default_sclk)
1229
		if (rdev->pm.default_sclk)
1226
			radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
1230
			radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
1227
		if (rdev->pm.default_mclk)
1231
		if (rdev->pm.default_mclk)
1228
			radeon_set_memory_clock(rdev, rdev->pm.default_mclk);
1232
			radeon_set_memory_clock(rdev, rdev->pm.default_mclk);
1229
	}
1233
	}
1230
	DRM_ERROR("radeon: dpm initialization failed\n");
1234
	DRM_ERROR("radeon: dpm initialization failed\n");
1231
	return ret;
1235
	return ret;
1232
}
1236
}
1233
 
1237
 
1234
struct radeon_dpm_quirk {
1238
struct radeon_dpm_quirk {
1235
	u32 chip_vendor;
1239
	u32 chip_vendor;
1236
	u32 chip_device;
1240
	u32 chip_device;
1237
	u32 subsys_vendor;
1241
	u32 subsys_vendor;
1238
	u32 subsys_device;
1242
	u32 subsys_device;
1239
};
1243
};
1240
 
1244
 
1241
/* cards with dpm stability problems */
1245
/* cards with dpm stability problems */
1242
static struct radeon_dpm_quirk radeon_dpm_quirk_list[] = {
1246
static struct radeon_dpm_quirk radeon_dpm_quirk_list[] = {
1243
	/* TURKS - https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1386534 */
1247
	/* TURKS - https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1386534 */
1244
	{ PCI_VENDOR_ID_ATI, 0x6759, 0x1682, 0x3195 },
1248
	{ PCI_VENDOR_ID_ATI, 0x6759, 0x1682, 0x3195 },
1245
	/* TURKS - https://bugzilla.kernel.org/show_bug.cgi?id=83731 */
1249
	/* TURKS - https://bugzilla.kernel.org/show_bug.cgi?id=83731 */
1246
	{ PCI_VENDOR_ID_ATI, 0x6840, 0x1179, 0xfb81 },
1250
	{ PCI_VENDOR_ID_ATI, 0x6840, 0x1179, 0xfb81 },
1247
	{ 0, 0, 0, 0 },
1251
	{ 0, 0, 0, 0 },
1248
};
1252
};
1249
 
1253
 
1250
int radeon_pm_init(struct radeon_device *rdev)
1254
int radeon_pm_init(struct radeon_device *rdev)
1251
{
1255
{
1252
	struct radeon_dpm_quirk *p = radeon_dpm_quirk_list;
1256
	struct radeon_dpm_quirk *p = radeon_dpm_quirk_list;
1253
	bool disable_dpm = false;
1257
	bool disable_dpm = false;
1254
 
1258
 
1255
	/* Apply dpm quirks */
1259
	/* Apply dpm quirks */
1256
	while (p && p->chip_device != 0) {
1260
	while (p && p->chip_device != 0) {
1257
		if (rdev->pdev->vendor == p->chip_vendor &&
1261
		if (rdev->pdev->vendor == p->chip_vendor &&
1258
		    rdev->pdev->device == p->chip_device &&
1262
		    rdev->pdev->device == p->chip_device &&
1259
		    rdev->pdev->subsystem_vendor == p->subsys_vendor &&
1263
		    rdev->pdev->subsystem_vendor == p->subsys_vendor &&
1260
		    rdev->pdev->subsystem_device == p->subsys_device) {
1264
		    rdev->pdev->subsystem_device == p->subsys_device) {
1261
			disable_dpm = true;
1265
			disable_dpm = true;
1262
			break;
1266
			break;
1263
		}
1267
		}
1264
		++p;
1268
		++p;
1265
	}
1269
	}
1266
 
1270
 
1267
	/* enable dpm on rv6xx+ */
1271
	/* enable dpm on rv6xx+ */
1268
	switch (rdev->family) {
1272
	switch (rdev->family) {
1269
	case CHIP_RV610:
1273
	case CHIP_RV610:
1270
	case CHIP_RV630:
1274
	case CHIP_RV630:
1271
	case CHIP_RV620:
1275
	case CHIP_RV620:
1272
	case CHIP_RV635:
1276
	case CHIP_RV635:
1273
	case CHIP_RV670:
1277
	case CHIP_RV670:
1274
	case CHIP_RS780:
1278
	case CHIP_RS780:
1275
	case CHIP_RS880:
1279
	case CHIP_RS880:
1276
	case CHIP_RV770:
1280
	case CHIP_RV770:
1277
		/* DPM requires the RLC, RV770+ dGPU requires SMC */
1281
		/* DPM requires the RLC, RV770+ dGPU requires SMC */
1278
		if (!rdev->rlc_fw)
1282
		if (!rdev->rlc_fw)
1279
			rdev->pm.pm_method = PM_METHOD_PROFILE;
1283
			rdev->pm.pm_method = PM_METHOD_PROFILE;
1280
		else if ((rdev->family >= CHIP_RV770) &&
1284
		else if ((rdev->family >= CHIP_RV770) &&
1281
			 (!(rdev->flags & RADEON_IS_IGP)) &&
1285
			 (!(rdev->flags & RADEON_IS_IGP)) &&
1282
			 (!rdev->smc_fw))
1286
			 (!rdev->smc_fw))
1283
			rdev->pm.pm_method = PM_METHOD_PROFILE;
1287
			rdev->pm.pm_method = PM_METHOD_PROFILE;
1284
		else if (radeon_dpm == 1)
1288
		else if (radeon_dpm == 1)
1285
			rdev->pm.pm_method = PM_METHOD_DPM;
1289
			rdev->pm.pm_method = PM_METHOD_DPM;
1286
		else
1290
		else
1287
			rdev->pm.pm_method = PM_METHOD_PROFILE;
1291
			rdev->pm.pm_method = PM_METHOD_PROFILE;
1288
		break;
1292
		break;
1289
	case CHIP_RV730:
1293
	case CHIP_RV730:
1290
	case CHIP_RV710:
1294
	case CHIP_RV710:
1291
	case CHIP_RV740:
1295
	case CHIP_RV740:
1292
	case CHIP_CEDAR:
1296
	case CHIP_CEDAR:
1293
	case CHIP_REDWOOD:
1297
	case CHIP_REDWOOD:
1294
	case CHIP_JUNIPER:
1298
	case CHIP_JUNIPER:
1295
	case CHIP_CYPRESS:
1299
	case CHIP_CYPRESS:
1296
	case CHIP_HEMLOCK:
1300
	case CHIP_HEMLOCK:
1297
	case CHIP_PALM:
1301
	case CHIP_PALM:
1298
	case CHIP_SUMO:
1302
	case CHIP_SUMO:
1299
	case CHIP_SUMO2:
1303
	case CHIP_SUMO2:
1300
	case CHIP_BARTS:
1304
	case CHIP_BARTS:
1301
	case CHIP_TURKS:
1305
	case CHIP_TURKS:
1302
	case CHIP_CAICOS:
1306
	case CHIP_CAICOS:
1303
	case CHIP_CAYMAN:
1307
	case CHIP_CAYMAN:
1304
	case CHIP_ARUBA:
1308
	case CHIP_ARUBA:
1305
	case CHIP_TAHITI:
1309
	case CHIP_TAHITI:
1306
	case CHIP_PITCAIRN:
1310
	case CHIP_PITCAIRN:
1307
	case CHIP_VERDE:
1311
	case CHIP_VERDE:
1308
	case CHIP_OLAND:
1312
	case CHIP_OLAND:
1309
	case CHIP_HAINAN:
1313
	case CHIP_HAINAN:
1310
	case CHIP_BONAIRE:
1314
	case CHIP_BONAIRE:
1311
	case CHIP_KABINI:
1315
	case CHIP_KABINI:
1312
	case CHIP_KAVERI:
1316
	case CHIP_KAVERI:
1313
	case CHIP_HAWAII:
1317
	case CHIP_HAWAII:
1314
	case CHIP_MULLINS:
1318
	case CHIP_MULLINS:
1315
		/* DPM requires the RLC, RV770+ dGPU requires SMC */
1319
		/* DPM requires the RLC, RV770+ dGPU requires SMC */
1316
		if (!rdev->rlc_fw)
1320
		if (!rdev->rlc_fw)
1317
			rdev->pm.pm_method = PM_METHOD_PROFILE;
1321
			rdev->pm.pm_method = PM_METHOD_PROFILE;
1318
		else if ((rdev->family >= CHIP_RV770) &&
1322
		else if ((rdev->family >= CHIP_RV770) &&
1319
			 (!(rdev->flags & RADEON_IS_IGP)) &&
1323
			 (!(rdev->flags & RADEON_IS_IGP)) &&
1320
			 (!rdev->smc_fw))
1324
			 (!rdev->smc_fw))
1321
			rdev->pm.pm_method = PM_METHOD_PROFILE;
1325
			rdev->pm.pm_method = PM_METHOD_PROFILE;
1322
		else if (disable_dpm && (radeon_dpm == -1))
1326
		else if (disable_dpm && (radeon_dpm == -1))
1323
			rdev->pm.pm_method = PM_METHOD_PROFILE;
1327
			rdev->pm.pm_method = PM_METHOD_PROFILE;
1324
		else if (radeon_dpm == 0)
1328
		else if (radeon_dpm == 0)
1325
			rdev->pm.pm_method = PM_METHOD_PROFILE;
1329
			rdev->pm.pm_method = PM_METHOD_PROFILE;
1326
		else
1330
		else
1327
			rdev->pm.pm_method = PM_METHOD_DPM;
1331
			rdev->pm.pm_method = PM_METHOD_DPM;
1328
		break;
1332
		break;
1329
	default:
1333
	default:
1330
		/* default to profile method */
1334
		/* default to profile method */
1331
		rdev->pm.pm_method = PM_METHOD_PROFILE;
1335
		rdev->pm.pm_method = PM_METHOD_PROFILE;
1332
		break;
1336
		break;
1333
	}
1337
	}
1334
 
1338
 
1335
	if (rdev->pm.pm_method == PM_METHOD_DPM)
1339
	if (rdev->pm.pm_method == PM_METHOD_DPM)
1336
		return radeon_pm_init_dpm(rdev);
1340
		return radeon_pm_init_dpm(rdev);
1337
	else
1341
	else
1338
		return radeon_pm_init_old(rdev);
1342
		return radeon_pm_init_old(rdev);
1339
}
1343
}
1340
 
1344
 
1341
int radeon_pm_late_init(struct radeon_device *rdev)
1345
int radeon_pm_late_init(struct radeon_device *rdev)
1342
{
1346
{
1343
	int ret = 0;
1347
	int ret = 0;
1344
 
1348
 
1345
	if (rdev->pm.pm_method == PM_METHOD_DPM) {
1349
	if (rdev->pm.pm_method == PM_METHOD_DPM) {
1346
		mutex_lock(&rdev->pm.mutex);
1350
		mutex_lock(&rdev->pm.mutex);
1347
		ret = radeon_dpm_late_enable(rdev);
1351
		ret = radeon_dpm_late_enable(rdev);
1348
		mutex_unlock(&rdev->pm.mutex);
1352
		mutex_unlock(&rdev->pm.mutex);
1349
	}
1353
	}
1350
	return ret;
1354
	return ret;
1351
}
1355
}
1352
 
1356
 
1353
static void radeon_pm_fini_old(struct radeon_device *rdev)
1357
static void radeon_pm_fini_old(struct radeon_device *rdev)
1354
{
1358
{
1355
	if (rdev->pm.num_power_states > 1) {
1359
	if (rdev->pm.num_power_states > 1) {
1356
		mutex_lock(&rdev->pm.mutex);
1360
		mutex_lock(&rdev->pm.mutex);
1357
		if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
1361
		if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
1358
			rdev->pm.profile = PM_PROFILE_DEFAULT;
1362
			rdev->pm.profile = PM_PROFILE_DEFAULT;
1359
			radeon_pm_update_profile(rdev);
1363
			radeon_pm_update_profile(rdev);
1360
			radeon_pm_set_clocks(rdev);
1364
			radeon_pm_set_clocks(rdev);
1361
		} else if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
1365
		} else if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
1362
			/* reset default clocks */
1366
			/* reset default clocks */
1363
			rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
1367
			rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
1364
			rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
1368
			rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
1365
			radeon_pm_set_clocks(rdev);
1369
			radeon_pm_set_clocks(rdev);
1366
		}
1370
		}
1367
		mutex_unlock(&rdev->pm.mutex);
1371
		mutex_unlock(&rdev->pm.mutex);
1368
 
1372
 
1369
//		cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work);
1373
//		cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work);
1370
 
1374
 
1371
   }
1375
   }
1372
 
1376
 
1373
	radeon_hwmon_fini(rdev);
1377
	radeon_hwmon_fini(rdev);
1374
	kfree(rdev->pm.power_state);
1378
	kfree(rdev->pm.power_state);
1375
}
1379
}
1376
 
1380
 
1377
static void radeon_pm_fini_dpm(struct radeon_device *rdev)
1381
static void radeon_pm_fini_dpm(struct radeon_device *rdev)
1378
{
1382
{
1379
	if (rdev->pm.num_power_states > 1) {
1383
	if (rdev->pm.num_power_states > 1) {
1380
		mutex_lock(&rdev->pm.mutex);
1384
		mutex_lock(&rdev->pm.mutex);
1381
		radeon_dpm_disable(rdev);
1385
		radeon_dpm_disable(rdev);
1382
		mutex_unlock(&rdev->pm.mutex);
1386
		mutex_unlock(&rdev->pm.mutex);
1383
	}
1387
	}
1384
	radeon_dpm_fini(rdev);
1388
	radeon_dpm_fini(rdev);
1385
 
1389
 
1386
	radeon_hwmon_fini(rdev);
1390
	radeon_hwmon_fini(rdev);
1387
	kfree(rdev->pm.power_state);
1391
	kfree(rdev->pm.power_state);
1388
}
1392
}
1389
 
1393
 
1390
void radeon_pm_fini(struct radeon_device *rdev)
1394
void radeon_pm_fini(struct radeon_device *rdev)
1391
{
1395
{
1392
	if (rdev->pm.pm_method == PM_METHOD_DPM)
1396
	if (rdev->pm.pm_method == PM_METHOD_DPM)
1393
		radeon_pm_fini_dpm(rdev);
1397
		radeon_pm_fini_dpm(rdev);
1394
	else
1398
	else
1395
		radeon_pm_fini_old(rdev);
1399
		radeon_pm_fini_old(rdev);
1396
}
1400
}
1397
 
1401
 
1398
static void radeon_pm_compute_clocks_old(struct radeon_device *rdev)
1402
static void radeon_pm_compute_clocks_old(struct radeon_device *rdev)
1399
{
1403
{
1400
	struct drm_device *ddev = rdev->ddev;
1404
	struct drm_device *ddev = rdev->ddev;
1401
	struct drm_crtc *crtc;
1405
	struct drm_crtc *crtc;
1402
	struct radeon_crtc *radeon_crtc;
1406
	struct radeon_crtc *radeon_crtc;
1403
 
1407
 
1404
	if (rdev->pm.num_power_states < 2)
1408
	if (rdev->pm.num_power_states < 2)
1405
		return;
1409
		return;
1406
 
1410
 
1407
	mutex_lock(&rdev->pm.mutex);
1411
	mutex_lock(&rdev->pm.mutex);
1408
 
1412
 
1409
	rdev->pm.active_crtcs = 0;
1413
	rdev->pm.active_crtcs = 0;
1410
	rdev->pm.active_crtc_count = 0;
1414
	rdev->pm.active_crtc_count = 0;
1411
	if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) {
1415
	if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) {
1412
		list_for_each_entry(crtc,
1416
		list_for_each_entry(crtc,
1413
				    &ddev->mode_config.crtc_list, head) {
1417
				    &ddev->mode_config.crtc_list, head) {
1414
			radeon_crtc = to_radeon_crtc(crtc);
1418
			radeon_crtc = to_radeon_crtc(crtc);
1415
			if (radeon_crtc->enabled) {
1419
			if (radeon_crtc->enabled) {
1416
				rdev->pm.active_crtcs |= (1 << radeon_crtc->crtc_id);
1420
				rdev->pm.active_crtcs |= (1 << radeon_crtc->crtc_id);
1417
				rdev->pm.active_crtc_count++;
1421
				rdev->pm.active_crtc_count++;
1418
			}
1422
			}
1419
		}
1423
		}
1420
	}
1424
	}
1421
 
1425
 
1422
	if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
1426
	if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
1423
		radeon_pm_update_profile(rdev);
1427
		radeon_pm_update_profile(rdev);
1424
		radeon_pm_set_clocks(rdev);
1428
		radeon_pm_set_clocks(rdev);
1425
	} else if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
1429
	} else if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
1426
		if (rdev->pm.dynpm_state != DYNPM_STATE_DISABLED) {
1430
		if (rdev->pm.dynpm_state != DYNPM_STATE_DISABLED) {
1427
			if (rdev->pm.active_crtc_count > 1) {
1431
			if (rdev->pm.active_crtc_count > 1) {
1428
				if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) {
1432
				if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) {
1429
//                   cancel_delayed_work(&rdev->pm.dynpm_idle_work);
1433
//                   cancel_delayed_work(&rdev->pm.dynpm_idle_work);
1430
 
1434
 
1431
					rdev->pm.dynpm_state = DYNPM_STATE_PAUSED;
1435
					rdev->pm.dynpm_state = DYNPM_STATE_PAUSED;
1432
					rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
1436
					rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
1433
					radeon_pm_get_dynpm_state(rdev);
1437
					radeon_pm_get_dynpm_state(rdev);
1434
					radeon_pm_set_clocks(rdev);
1438
					radeon_pm_set_clocks(rdev);
1435
 
1439
 
1436
					DRM_DEBUG_DRIVER("radeon: dynamic power management deactivated\n");
1440
					DRM_DEBUG_DRIVER("radeon: dynamic power management deactivated\n");
1437
				}
1441
				}
1438
			} else if (rdev->pm.active_crtc_count == 1) {
1442
			} else if (rdev->pm.active_crtc_count == 1) {
1439
				/* TODO: Increase clocks if needed for current mode */
1443
				/* TODO: Increase clocks if needed for current mode */
1440
 
1444
 
1441
				if (rdev->pm.dynpm_state == DYNPM_STATE_MINIMUM) {
1445
				if (rdev->pm.dynpm_state == DYNPM_STATE_MINIMUM) {
1442
					rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
1446
					rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
1443
					rdev->pm.dynpm_planned_action = DYNPM_ACTION_UPCLOCK;
1447
					rdev->pm.dynpm_planned_action = DYNPM_ACTION_UPCLOCK;
1444
					radeon_pm_get_dynpm_state(rdev);
1448
					radeon_pm_get_dynpm_state(rdev);
1445
					radeon_pm_set_clocks(rdev);
1449
					radeon_pm_set_clocks(rdev);
1446
 
1450
 
1447
//					schedule_delayed_work(&rdev->pm.dynpm_idle_work,
1451
//					schedule_delayed_work(&rdev->pm.dynpm_idle_work,
1448
//							   msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
1452
//							   msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
1449
				} else if (rdev->pm.dynpm_state == DYNPM_STATE_PAUSED) {
1453
				} else if (rdev->pm.dynpm_state == DYNPM_STATE_PAUSED) {
1450
					rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
1454
					rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
1451
//					schedule_delayed_work(&rdev->pm.dynpm_idle_work,
1455
//					schedule_delayed_work(&rdev->pm.dynpm_idle_work,
1452
//							   msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
1456
//							   msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
1453
					DRM_DEBUG_DRIVER("radeon: dynamic power management activated\n");
1457
					DRM_DEBUG_DRIVER("radeon: dynamic power management activated\n");
1454
				}
1458
				}
1455
			} else { /* count == 0 */
1459
			} else { /* count == 0 */
1456
				if (rdev->pm.dynpm_state != DYNPM_STATE_MINIMUM) {
1460
				if (rdev->pm.dynpm_state != DYNPM_STATE_MINIMUM) {
1457
//					cancel_delayed_work(&rdev->pm.dynpm_idle_work);
1461
//					cancel_delayed_work(&rdev->pm.dynpm_idle_work);
1458
 
1462
 
1459
					rdev->pm.dynpm_state = DYNPM_STATE_MINIMUM;
1463
					rdev->pm.dynpm_state = DYNPM_STATE_MINIMUM;
1460
					rdev->pm.dynpm_planned_action = DYNPM_ACTION_MINIMUM;
1464
					rdev->pm.dynpm_planned_action = DYNPM_ACTION_MINIMUM;
1461
					radeon_pm_get_dynpm_state(rdev);
1465
					radeon_pm_get_dynpm_state(rdev);
1462
					radeon_pm_set_clocks(rdev);
1466
					radeon_pm_set_clocks(rdev);
1463
				}
1467
				}
1464
			}
1468
			}
1465
		}
1469
		}
1466
	}
1470
	}
1467
 
1471
 
1468
	mutex_unlock(&rdev->pm.mutex);
1472
	mutex_unlock(&rdev->pm.mutex);
1469
}
1473
}
1470
 
1474
 
1471
static void radeon_pm_compute_clocks_dpm(struct radeon_device *rdev)
1475
static void radeon_pm_compute_clocks_dpm(struct radeon_device *rdev)
1472
{
1476
{
1473
	struct drm_device *ddev = rdev->ddev;
1477
	struct drm_device *ddev = rdev->ddev;
1474
	struct drm_crtc *crtc;
1478
	struct drm_crtc *crtc;
1475
	struct radeon_crtc *radeon_crtc;
1479
	struct radeon_crtc *radeon_crtc;
1476
 
1480
 
1477
	if (!rdev->pm.dpm_enabled)
1481
	if (!rdev->pm.dpm_enabled)
1478
		return;
1482
		return;
1479
 
1483
 
1480
	mutex_lock(&rdev->pm.mutex);
1484
	mutex_lock(&rdev->pm.mutex);
1481
 
1485
 
1482
	/* update active crtc counts */
1486
	/* update active crtc counts */
1483
	rdev->pm.dpm.new_active_crtcs = 0;
1487
	rdev->pm.dpm.new_active_crtcs = 0;
1484
	rdev->pm.dpm.new_active_crtc_count = 0;
1488
	rdev->pm.dpm.new_active_crtc_count = 0;
1485
	if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) {
1489
	if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) {
1486
		list_for_each_entry(crtc,
1490
		list_for_each_entry(crtc,
1487
				    &ddev->mode_config.crtc_list, head) {
1491
				    &ddev->mode_config.crtc_list, head) {
1488
			radeon_crtc = to_radeon_crtc(crtc);
1492
			radeon_crtc = to_radeon_crtc(crtc);
1489
			if (crtc->enabled) {
1493
			if (crtc->enabled) {
1490
				rdev->pm.dpm.new_active_crtcs |= (1 << radeon_crtc->crtc_id);
1494
				rdev->pm.dpm.new_active_crtcs |= (1 << radeon_crtc->crtc_id);
1491
				rdev->pm.dpm.new_active_crtc_count++;
1495
				rdev->pm.dpm.new_active_crtc_count++;
1492
			}
1496
			}
1493
		}
1497
		}
1494
	}
1498
	}
1495
 
1499
 
1496
	/* update battery/ac status */
1500
	/* update battery/ac status */
1497
	if (power_supply_is_system_supplied() > 0)
1501
	if (power_supply_is_system_supplied() > 0)
1498
		rdev->pm.dpm.ac_power = true;
1502
		rdev->pm.dpm.ac_power = true;
1499
	else
1503
	else
1500
		rdev->pm.dpm.ac_power = false;
1504
		rdev->pm.dpm.ac_power = false;
1501
 
1505
 
1502
	radeon_dpm_change_power_state_locked(rdev);
1506
	radeon_dpm_change_power_state_locked(rdev);
1503
 
1507
 
1504
	mutex_unlock(&rdev->pm.mutex);
1508
	mutex_unlock(&rdev->pm.mutex);
1505
 
1509
 
1506
}
1510
}
1507
 
1511
 
1508
void radeon_pm_compute_clocks(struct radeon_device *rdev)
1512
void radeon_pm_compute_clocks(struct radeon_device *rdev)
1509
{
1513
{
1510
	if (rdev->pm.pm_method == PM_METHOD_DPM)
1514
	if (rdev->pm.pm_method == PM_METHOD_DPM)
1511
		radeon_pm_compute_clocks_dpm(rdev);
1515
		radeon_pm_compute_clocks_dpm(rdev);
1512
	else
1516
	else
1513
		radeon_pm_compute_clocks_old(rdev);
1517
		radeon_pm_compute_clocks_old(rdev);
1514
}
1518
}
1515
 
1519
 
1516
static bool radeon_pm_in_vbl(struct radeon_device *rdev)
1520
static bool radeon_pm_in_vbl(struct radeon_device *rdev)
1517
{
1521
{
1518
	int  crtc, vpos, hpos, vbl_status;
1522
	int  crtc, vpos, hpos, vbl_status;
1519
	bool in_vbl = true;
1523
	bool in_vbl = true;
1520
 
1524
 
1521
	/* Iterate over all active crtc's. All crtc's must be in vblank,
1525
	/* Iterate over all active crtc's. All crtc's must be in vblank,
1522
	 * otherwise return in_vbl == false.
1526
	 * otherwise return in_vbl == false.
1523
	 */
1527
	 */
1524
	for (crtc = 0; (crtc < rdev->num_crtc) && in_vbl; crtc++) {
1528
	for (crtc = 0; (crtc < rdev->num_crtc) && in_vbl; crtc++) {
1525
		if (rdev->pm.active_crtcs & (1 << crtc)) {
1529
		if (rdev->pm.active_crtcs & (1 << crtc)) {
1526
			vbl_status = radeon_get_crtc_scanoutpos(rdev->ddev,
1530
			vbl_status = radeon_get_crtc_scanoutpos(rdev->ddev,
1527
								crtc,
1531
								crtc,
1528
								USE_REAL_VBLANKSTART,
1532
								USE_REAL_VBLANKSTART,
1529
								&vpos, &hpos, NULL, NULL,
1533
								&vpos, &hpos, NULL, NULL,
1530
								&rdev->mode_info.crtcs[crtc]->base.hwmode);
1534
								&rdev->mode_info.crtcs[crtc]->base.hwmode);
1531
			if ((vbl_status & DRM_SCANOUTPOS_VALID) &&
1535
			if ((vbl_status & DRM_SCANOUTPOS_VALID) &&
1532
			    !(vbl_status & DRM_SCANOUTPOS_IN_VBLANK))
1536
			    !(vbl_status & DRM_SCANOUTPOS_IN_VBLANK))
1533
				in_vbl = false;
1537
				in_vbl = false;
1534
		}
1538
		}
1535
	}
1539
	}
1536
 
1540
 
1537
	return in_vbl;
1541
	return in_vbl;
1538
}
1542
}
1539
 
1543
 
1540
static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish)
1544
static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish)
1541
{
1545
{
1542
	u32 stat_crtc = 0;
1546
	u32 stat_crtc = 0;
1543
	bool in_vbl = radeon_pm_in_vbl(rdev);
1547
	bool in_vbl = radeon_pm_in_vbl(rdev);
1544
 
1548
 
1545
	if (in_vbl == false)
1549
	if (in_vbl == false)
1546
		DRM_DEBUG_DRIVER("not in vbl for pm change %08x at %s\n", stat_crtc,
1550
		DRM_DEBUG_DRIVER("not in vbl for pm change %08x at %s\n", stat_crtc,
1547
			 finish ? "exit" : "entry");
1551
			 finish ? "exit" : "entry");
1548
	return in_vbl;
1552
	return in_vbl;
1549
}
1553
}
1550
 
1554
 
1551
 
1555
 
1552
/*
1556
/*
1553
 * Debugfs info
1557
 * Debugfs info
1554
 */
1558
 */
1555
#if defined(CONFIG_DEBUG_FS)
1559
#if defined(CONFIG_DEBUG_FS)
1556
 
1560
 
1557
static int radeon_debugfs_pm_info(struct seq_file *m, void *data)
1561
static int radeon_debugfs_pm_info(struct seq_file *m, void *data)
1558
{
1562
{
1559
	struct drm_info_node *node = (struct drm_info_node *) m->private;
1563
	struct drm_info_node *node = (struct drm_info_node *) m->private;
1560
	struct drm_device *dev = node->minor->dev;
1564
	struct drm_device *dev = node->minor->dev;
1561
	struct radeon_device *rdev = dev->dev_private;
1565
	struct radeon_device *rdev = dev->dev_private;
1562
	struct drm_device *ddev = rdev->ddev;
1566
	struct drm_device *ddev = rdev->ddev;
1563
 
1567
 
1564
	if  ((rdev->flags & RADEON_IS_PX) &&
1568
	if  ((rdev->flags & RADEON_IS_PX) &&
1565
	     (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) {
1569
	     (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) {
1566
		seq_printf(m, "PX asic powered off\n");
1570
		seq_printf(m, "PX asic powered off\n");
1567
	} else if (rdev->pm.dpm_enabled) {
1571
	} else if (rdev->pm.dpm_enabled) {
1568
		mutex_lock(&rdev->pm.mutex);
1572
		mutex_lock(&rdev->pm.mutex);
1569
		if (rdev->asic->dpm.debugfs_print_current_performance_level)
1573
		if (rdev->asic->dpm.debugfs_print_current_performance_level)
1570
			radeon_dpm_debugfs_print_current_performance_level(rdev, m);
1574
			radeon_dpm_debugfs_print_current_performance_level(rdev, m);
1571
		else
1575
		else
1572
			seq_printf(m, "Debugfs support not implemented for this asic\n");
1576
			seq_printf(m, "Debugfs support not implemented for this asic\n");
1573
		mutex_unlock(&rdev->pm.mutex);
1577
		mutex_unlock(&rdev->pm.mutex);
1574
	} else {
1578
	} else {
1575
		seq_printf(m, "default engine clock: %u0 kHz\n", rdev->pm.default_sclk);
1579
		seq_printf(m, "default engine clock: %u0 kHz\n", rdev->pm.default_sclk);
1576
		/* radeon_get_engine_clock is not reliable on APUs so just print the current clock */
1580
		/* radeon_get_engine_clock is not reliable on APUs so just print the current clock */
1577
		if ((rdev->family >= CHIP_PALM) && (rdev->flags & RADEON_IS_IGP))
1581
		if ((rdev->family >= CHIP_PALM) && (rdev->flags & RADEON_IS_IGP))
1578
			seq_printf(m, "current engine clock: %u0 kHz\n", rdev->pm.current_sclk);
1582
			seq_printf(m, "current engine clock: %u0 kHz\n", rdev->pm.current_sclk);
1579
		else
1583
		else
1580
			seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev));
1584
			seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev));
1581
		seq_printf(m, "default memory clock: %u0 kHz\n", rdev->pm.default_mclk);
1585
		seq_printf(m, "default memory clock: %u0 kHz\n", rdev->pm.default_mclk);
1582
		if (rdev->asic->pm.get_memory_clock)
1586
		if (rdev->asic->pm.get_memory_clock)
1583
			seq_printf(m, "current memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev));
1587
			seq_printf(m, "current memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev));
1584
		if (rdev->pm.current_vddc)
1588
		if (rdev->pm.current_vddc)
1585
			seq_printf(m, "voltage: %u mV\n", rdev->pm.current_vddc);
1589
			seq_printf(m, "voltage: %u mV\n", rdev->pm.current_vddc);
1586
		if (rdev->asic->pm.get_pcie_lanes)
1590
		if (rdev->asic->pm.get_pcie_lanes)
1587
			seq_printf(m, "PCIE lanes: %d\n", radeon_get_pcie_lanes(rdev));
1591
			seq_printf(m, "PCIE lanes: %d\n", radeon_get_pcie_lanes(rdev));
1588
	}
1592
	}
1589
 
1593
 
1590
	return 0;
1594
	return 0;
1591
}
1595
}
1592
 
1596
 
1593
static struct drm_info_list radeon_pm_info_list[] = {
1597
static struct drm_info_list radeon_pm_info_list[] = {
1594
	{"radeon_pm_info", radeon_debugfs_pm_info, 0, NULL},
1598
	{"radeon_pm_info", radeon_debugfs_pm_info, 0, NULL},
1595
};
1599
};
1596
#endif
1600
#endif
1597
 
1601
 
1598
static int radeon_debugfs_pm_init(struct radeon_device *rdev)
1602
static int radeon_debugfs_pm_init(struct radeon_device *rdev)
1599
{
1603
{
1600
#if defined(CONFIG_DEBUG_FS)
1604
#if defined(CONFIG_DEBUG_FS)
1601
	return radeon_debugfs_add_files(rdev, radeon_pm_info_list, ARRAY_SIZE(radeon_pm_info_list));
1605
	return radeon_debugfs_add_files(rdev, radeon_pm_info_list, ARRAY_SIZE(radeon_pm_info_list));
1602
#else
1606
#else
1603
	return 0;
1607
	return 0;
1604
#endif
1608
#endif
1605
}
1609
}