Subversion Repositories Kolibri OS

Rev

Rev 3764 | Rev 5128 | Go to most recent revision | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 3764 Rev 5078
Line 35... Line 35...
35
#include "radeon_asic.h"
35
#include "radeon_asic.h"
36
#include "radeon_mode.h"
36
#include "radeon_mode.h"
37
#include "r600d.h"
37
#include "r600d.h"
38
#include "atom.h"
38
#include "atom.h"
39
#include "avivod.h"
39
#include "avivod.h"
40
 
-
 
41
#define PFP_UCODE_SIZE 576
-
 
42
#define PM4_UCODE_SIZE 1792
-
 
43
#define RLC_UCODE_SIZE 768
40
#include "radeon_ucode.h"
44
#define R700_PFP_UCODE_SIZE 848
-
 
45
#define R700_PM4_UCODE_SIZE 1360
-
 
46
#define R700_RLC_UCODE_SIZE 1024
-
 
47
#define EVERGREEN_PFP_UCODE_SIZE 1120
-
 
48
#define EVERGREEN_PM4_UCODE_SIZE 1376
-
 
49
#define EVERGREEN_RLC_UCODE_SIZE 768
-
 
50
#define CAYMAN_RLC_UCODE_SIZE 1024
-
 
51
#define ARUBA_RLC_UCODE_SIZE 1536
-
 
Line 52... Line 41...
52
 
41
 
53
/* Firmware Names */
42
/* Firmware Names */
54
MODULE_FIRMWARE("radeon/R600_pfp.bin");
43
MODULE_FIRMWARE("radeon/R600_pfp.bin");
55
MODULE_FIRMWARE("radeon/R600_me.bin");
44
MODULE_FIRMWARE("radeon/R600_me.bin");
Line 65... Line 54...
65
MODULE_FIRMWARE("radeon/RV670_me.bin");
54
MODULE_FIRMWARE("radeon/RV670_me.bin");
66
MODULE_FIRMWARE("radeon/RS780_pfp.bin");
55
MODULE_FIRMWARE("radeon/RS780_pfp.bin");
67
MODULE_FIRMWARE("radeon/RS780_me.bin");
56
MODULE_FIRMWARE("radeon/RS780_me.bin");
68
MODULE_FIRMWARE("radeon/RV770_pfp.bin");
57
MODULE_FIRMWARE("radeon/RV770_pfp.bin");
69
MODULE_FIRMWARE("radeon/RV770_me.bin");
58
MODULE_FIRMWARE("radeon/RV770_me.bin");
-
 
59
MODULE_FIRMWARE("radeon/RV770_smc.bin");
70
MODULE_FIRMWARE("radeon/RV730_pfp.bin");
60
MODULE_FIRMWARE("radeon/RV730_pfp.bin");
71
MODULE_FIRMWARE("radeon/RV730_me.bin");
61
MODULE_FIRMWARE("radeon/RV730_me.bin");
-
 
62
MODULE_FIRMWARE("radeon/RV730_smc.bin");
-
 
63
MODULE_FIRMWARE("radeon/RV740_smc.bin");
72
MODULE_FIRMWARE("radeon/RV710_pfp.bin");
64
MODULE_FIRMWARE("radeon/RV710_pfp.bin");
73
MODULE_FIRMWARE("radeon/RV710_me.bin");
65
MODULE_FIRMWARE("radeon/RV710_me.bin");
-
 
66
MODULE_FIRMWARE("radeon/RV710_smc.bin");
74
MODULE_FIRMWARE("radeon/R600_rlc.bin");
67
MODULE_FIRMWARE("radeon/R600_rlc.bin");
75
MODULE_FIRMWARE("radeon/R700_rlc.bin");
68
MODULE_FIRMWARE("radeon/R700_rlc.bin");
76
MODULE_FIRMWARE("radeon/CEDAR_pfp.bin");
69
MODULE_FIRMWARE("radeon/CEDAR_pfp.bin");
77
MODULE_FIRMWARE("radeon/CEDAR_me.bin");
70
MODULE_FIRMWARE("radeon/CEDAR_me.bin");
78
MODULE_FIRMWARE("radeon/CEDAR_rlc.bin");
71
MODULE_FIRMWARE("radeon/CEDAR_rlc.bin");
-
 
72
MODULE_FIRMWARE("radeon/CEDAR_smc.bin");
79
MODULE_FIRMWARE("radeon/REDWOOD_pfp.bin");
73
MODULE_FIRMWARE("radeon/REDWOOD_pfp.bin");
80
MODULE_FIRMWARE("radeon/REDWOOD_me.bin");
74
MODULE_FIRMWARE("radeon/REDWOOD_me.bin");
81
MODULE_FIRMWARE("radeon/REDWOOD_rlc.bin");
75
MODULE_FIRMWARE("radeon/REDWOOD_rlc.bin");
-
 
76
MODULE_FIRMWARE("radeon/REDWOOD_smc.bin");
82
MODULE_FIRMWARE("radeon/JUNIPER_pfp.bin");
77
MODULE_FIRMWARE("radeon/JUNIPER_pfp.bin");
83
MODULE_FIRMWARE("radeon/JUNIPER_me.bin");
78
MODULE_FIRMWARE("radeon/JUNIPER_me.bin");
84
MODULE_FIRMWARE("radeon/JUNIPER_rlc.bin");
79
MODULE_FIRMWARE("radeon/JUNIPER_rlc.bin");
-
 
80
MODULE_FIRMWARE("radeon/JUNIPER_smc.bin");
85
MODULE_FIRMWARE("radeon/CYPRESS_pfp.bin");
81
MODULE_FIRMWARE("radeon/CYPRESS_pfp.bin");
86
MODULE_FIRMWARE("radeon/CYPRESS_me.bin");
82
MODULE_FIRMWARE("radeon/CYPRESS_me.bin");
87
MODULE_FIRMWARE("radeon/CYPRESS_rlc.bin");
83
MODULE_FIRMWARE("radeon/CYPRESS_rlc.bin");
-
 
84
MODULE_FIRMWARE("radeon/CYPRESS_smc.bin");
88
MODULE_FIRMWARE("radeon/PALM_pfp.bin");
85
MODULE_FIRMWARE("radeon/PALM_pfp.bin");
89
MODULE_FIRMWARE("radeon/PALM_me.bin");
86
MODULE_FIRMWARE("radeon/PALM_me.bin");
90
MODULE_FIRMWARE("radeon/SUMO_rlc.bin");
87
MODULE_FIRMWARE("radeon/SUMO_rlc.bin");
91
MODULE_FIRMWARE("radeon/SUMO_pfp.bin");
88
MODULE_FIRMWARE("radeon/SUMO_pfp.bin");
92
MODULE_FIRMWARE("radeon/SUMO_me.bin");
89
MODULE_FIRMWARE("radeon/SUMO_me.bin");
Line 105... Line 102...
105
int r600_mc_wait_for_idle(struct radeon_device *rdev);
102
int r600_mc_wait_for_idle(struct radeon_device *rdev);
106
static void r600_gpu_init(struct radeon_device *rdev);
103
static void r600_gpu_init(struct radeon_device *rdev);
107
void r600_fini(struct radeon_device *rdev);
104
void r600_fini(struct radeon_device *rdev);
108
void r600_irq_disable(struct radeon_device *rdev);
105
void r600_irq_disable(struct radeon_device *rdev);
109
static void r600_pcie_gen2_enable(struct radeon_device *rdev);
106
static void r600_pcie_gen2_enable(struct radeon_device *rdev);
-
 
107
extern int evergreen_rlc_resume(struct radeon_device *rdev);
-
 
108
extern void rv770_set_clk_bypass_mode(struct radeon_device *rdev);
Line 110... Line 109...
110
 
109
 
111
/**
110
/**
112
 * r600_get_xclk - get the xclk
111
 * r600_get_xclk - get the xclk
113
 *
112
 *
Line 119... Line 118...
119
u32 r600_get_xclk(struct radeon_device *rdev)
118
u32 r600_get_xclk(struct radeon_device *rdev)
120
{
119
{
121
	return rdev->clock.spll.reference_freq;
120
	return rdev->clock.spll.reference_freq;
122
}
121
}
Line -... Line 122...
-
 
122
 
-
 
123
int r600_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
-
 
124
{
-
 
125
	return 0;
-
 
126
}
-
 
127
 
-
 
128
void dce3_program_fmt(struct drm_encoder *encoder)
-
 
129
{
-
 
130
	struct drm_device *dev = encoder->dev;
-
 
131
	struct radeon_device *rdev = dev->dev_private;
-
 
132
	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
-
 
133
	struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
-
 
134
	struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
-
 
135
	int bpc = 0;
-
 
136
	u32 tmp = 0;
-
 
137
	enum radeon_connector_dither dither = RADEON_FMT_DITHER_DISABLE;
-
 
138
 
-
 
139
	if (connector) {
-
 
140
		struct radeon_connector *radeon_connector = to_radeon_connector(connector);
-
 
141
		bpc = radeon_get_monitor_bpc(connector);
-
 
142
		dither = radeon_connector->dither;
-
 
143
	}
-
 
144
 
-
 
145
	/* LVDS FMT is set up by atom */
-
 
146
	if (radeon_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
-
 
147
		return;
-
 
148
 
-
 
149
	/* not needed for analog */
-
 
150
	if ((radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1) ||
-
 
151
	    (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2))
-
 
152
		return;
-
 
153
 
-
 
154
	if (bpc == 0)
-
 
155
		return;
-
 
156
 
-
 
157
	switch (bpc) {
-
 
158
	case 6:
-
 
159
		if (dither == RADEON_FMT_DITHER_ENABLE)
-
 
160
			/* XXX sort out optimal dither settings */
-
 
161
			tmp |= FMT_SPATIAL_DITHER_EN;
-
 
162
		else
-
 
163
			tmp |= FMT_TRUNCATE_EN;
-
 
164
		break;
-
 
165
	case 8:
-
 
166
		if (dither == RADEON_FMT_DITHER_ENABLE)
-
 
167
			/* XXX sort out optimal dither settings */
-
 
168
			tmp |= (FMT_SPATIAL_DITHER_EN | FMT_SPATIAL_DITHER_DEPTH);
-
 
169
		else
-
 
170
			tmp |= (FMT_TRUNCATE_EN | FMT_TRUNCATE_DEPTH);
-
 
171
		break;
-
 
172
	case 10:
-
 
173
	default:
-
 
174
		/* not needed */
-
 
175
		break;
-
 
176
	}
-
 
177
 
-
 
178
	WREG32(FMT_BIT_DEPTH_CONTROL + radeon_crtc->crtc_offset, tmp);
-
 
179
}
123
 
180
 
124
/* get temperature in millidegrees */
181
/* get temperature in millidegrees */
125
int rv6xx_get_temp(struct radeon_device *rdev)
182
int rv6xx_get_temp(struct radeon_device *rdev)
126
{
183
{
127
	u32 temp = (RREG32(CG_THERMAL_STATUS) & ASIC_T_MASK) >>
184
	u32 temp = (RREG32(CG_THERMAL_STATUS) & ASIC_T_MASK) >>
Line 132... Line 189...
132
		actual_temp -= 256;
189
		actual_temp -= 256;
Line 133... Line 190...
133
 
190
 
134
	return actual_temp * 1000;
191
	return actual_temp * 1000;
Line -... Line 192...
-
 
192
}
-
 
193
 
-
 
194
void r600_pm_get_dynpm_state(struct radeon_device *rdev)
Line -... Line 195...
-
 
195
{
-
 
196
	int i;
Line -... Line 197...
-
 
197
 
-
 
198
	rdev->pm.dynpm_can_upclock = true;
-
 
199
	rdev->pm.dynpm_can_downclock = true;
-
 
200
 
-
 
201
	/* power state array is low to high, default is first */
-
 
202
	if ((rdev->flags & RADEON_IS_IGP) || (rdev->family == CHIP_R600)) {
-
 
203
		int min_power_state_index = 0;
-
 
204
 
-
 
205
		if (rdev->pm.num_power_states > 2)
-
 
206
			min_power_state_index = 1;
-
 
207
 
-
 
208
		switch (rdev->pm.dynpm_planned_action) {
-
 
209
		case DYNPM_ACTION_MINIMUM:
-
 
210
			rdev->pm.requested_power_state_index = min_power_state_index;
-
 
211
			rdev->pm.requested_clock_mode_index = 0;
-
 
212
			rdev->pm.dynpm_can_downclock = false;
-
 
213
			break;
-
 
214
		case DYNPM_ACTION_DOWNCLOCK:
-
 
215
			if (rdev->pm.current_power_state_index == min_power_state_index) {
-
 
216
				rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
-
 
217
				rdev->pm.dynpm_can_downclock = false;
-
 
218
			} else {
-
 
219
				if (rdev->pm.active_crtc_count > 1) {
-
 
220
					for (i = 0; i < rdev->pm.num_power_states; i++) {
-
 
221
						if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
-
 
222
							continue;
-
 
223
						else if (i >= rdev->pm.current_power_state_index) {
-
 
224
							rdev->pm.requested_power_state_index =
-
 
225
								rdev->pm.current_power_state_index;
-
 
226
							break;
-
 
227
						} else {
-
 
228
							rdev->pm.requested_power_state_index = i;
-
 
229
							break;
-
 
230
						}
-
 
231
					}
-
 
232
				} else {
-
 
233
					if (rdev->pm.current_power_state_index == 0)
-
 
234
						rdev->pm.requested_power_state_index =
-
 
235
							rdev->pm.num_power_states - 1;
-
 
236
					else
-
 
237
						rdev->pm.requested_power_state_index =
-
 
238
							rdev->pm.current_power_state_index - 1;
-
 
239
				}
-
 
240
			}
-
 
241
			rdev->pm.requested_clock_mode_index = 0;
-
 
242
			/* don't use the power state if crtcs are active and no display flag is set */
-
 
243
			if ((rdev->pm.active_crtc_count > 0) &&
-
 
244
			    (rdev->pm.power_state[rdev->pm.requested_power_state_index].
-
 
245
			     clock_info[rdev->pm.requested_clock_mode_index].flags &
-
 
246
			     RADEON_PM_MODE_NO_DISPLAY)) {
-
 
247
				rdev->pm.requested_power_state_index++;
-
 
248
			}
-
 
249
			break;
-
 
250
		case DYNPM_ACTION_UPCLOCK:
-
 
251
			if (rdev->pm.current_power_state_index == (rdev->pm.num_power_states - 1)) {
-
 
252
				rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
-
 
253
				rdev->pm.dynpm_can_upclock = false;
-
 
254
			} else {
-
 
255
				if (rdev->pm.active_crtc_count > 1) {
-
 
256
					for (i = (rdev->pm.num_power_states - 1); i >= 0; i--) {
-
 
257
						if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
-
 
258
							continue;
-
 
259
						else if (i <= rdev->pm.current_power_state_index) {
-
 
260
							rdev->pm.requested_power_state_index =
-
 
261
								rdev->pm.current_power_state_index;
-
 
262
							break;
-
 
263
						} else {
-
 
264
							rdev->pm.requested_power_state_index = i;
-
 
265
							break;
-
 
266
						}
-
 
267
					}
-
 
268
				} else
-
 
269
					rdev->pm.requested_power_state_index =
-
 
270
						rdev->pm.current_power_state_index + 1;
-
 
271
			}
-
 
272
			rdev->pm.requested_clock_mode_index = 0;
-
 
273
			break;
-
 
274
		case DYNPM_ACTION_DEFAULT:
-
 
275
			rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index;
-
 
276
			rdev->pm.requested_clock_mode_index = 0;
-
 
277
			rdev->pm.dynpm_can_upclock = false;
-
 
278
			break;
-
 
279
		case DYNPM_ACTION_NONE:
-
 
280
		default:
-
 
281
			DRM_ERROR("Requested mode for not defined action\n");
-
 
282
			return;
-
 
283
		}
-
 
284
	} else {
-
 
285
		/* XXX select a power state based on AC/DC, single/dualhead, etc. */
-
 
286
		/* for now just select the first power state and switch between clock modes */
-
 
287
		/* power state array is low to high, default is first (0) */
-
 
288
		if (rdev->pm.active_crtc_count > 1) {
-
 
289
			rdev->pm.requested_power_state_index = -1;
-
 
290
			/* start at 1 as we don't want the default mode */
-
 
291
			for (i = 1; i < rdev->pm.num_power_states; i++) {
-
 
292
				if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
-
 
293
					continue;
-
 
294
				else if ((rdev->pm.power_state[i].type == POWER_STATE_TYPE_PERFORMANCE) ||
-
 
295
					 (rdev->pm.power_state[i].type == POWER_STATE_TYPE_BATTERY)) {
-
 
296
					rdev->pm.requested_power_state_index = i;
-
 
297
					break;
-
 
298
				}
-
 
299
			}
-
 
300
			/* if nothing selected, grab the default state. */
Line -... Line 301...
-
 
301
			if (rdev->pm.requested_power_state_index == -1)
-
 
302
				rdev->pm.requested_power_state_index = 0;
-
 
303
		} else
-
 
304
			rdev->pm.requested_power_state_index = 1;
-
 
305
 
-
 
306
		switch (rdev->pm.dynpm_planned_action) {
-
 
307
		case DYNPM_ACTION_MINIMUM:
-
 
308
			rdev->pm.requested_clock_mode_index = 0;
-
 
309
			rdev->pm.dynpm_can_downclock = false;
-
 
310
			break;
-
 
311
		case DYNPM_ACTION_DOWNCLOCK:
-
 
312
			if (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index) {
-
 
313
				if (rdev->pm.current_clock_mode_index == 0) {
-
 
314
					rdev->pm.requested_clock_mode_index = 0;
-
 
315
					rdev->pm.dynpm_can_downclock = false;
-
 
316
				} else
-
 
317
					rdev->pm.requested_clock_mode_index =
-
 
318
						rdev->pm.current_clock_mode_index - 1;
-
 
319
			} else {
-
 
320
				rdev->pm.requested_clock_mode_index = 0;
-
 
321
				rdev->pm.dynpm_can_downclock = false;
-
 
322
			}
-
 
323
			/* don't use the power state if crtcs are active and no display flag is set */
-
 
324
			if ((rdev->pm.active_crtc_count > 0) &&
-
 
325
			    (rdev->pm.power_state[rdev->pm.requested_power_state_index].
-
 
326
			     clock_info[rdev->pm.requested_clock_mode_index].flags &
-
 
327
			     RADEON_PM_MODE_NO_DISPLAY)) {
-
 
328
				rdev->pm.requested_clock_mode_index++;
-
 
329
			}
-
 
330
			break;
-
 
331
		case DYNPM_ACTION_UPCLOCK:
-
 
332
			if (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index) {
-
 
333
				if (rdev->pm.current_clock_mode_index ==
-
 
334
				    (rdev->pm.power_state[rdev->pm.requested_power_state_index].num_clock_modes - 1)) {
-
 
335
					rdev->pm.requested_clock_mode_index = rdev->pm.current_clock_mode_index;
-
 
336
					rdev->pm.dynpm_can_upclock = false;
-
 
337
				} else
-
 
338
					rdev->pm.requested_clock_mode_index =
-
 
339
						rdev->pm.current_clock_mode_index + 1;
-
 
340
			} else {
-
 
341
				rdev->pm.requested_clock_mode_index =
-
 
342
					rdev->pm.power_state[rdev->pm.requested_power_state_index].num_clock_modes - 1;
-
 
343
				rdev->pm.dynpm_can_upclock = false;
-
 
344
			}
-
 
345
			break;
-
 
346
		case DYNPM_ACTION_DEFAULT:
-
 
347
			rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index;
-
 
348
			rdev->pm.requested_clock_mode_index = 0;
-
 
349
			rdev->pm.dynpm_can_upclock = false;
-
 
350
			break;
-
 
351
		case DYNPM_ACTION_NONE:
Line -... Line 352...
-
 
352
		default:
-
 
353
			DRM_ERROR("Requested mode for not defined action\n");
-
 
354
			return;
-
 
355
		}
-
 
356
	}
-
 
357
 
-
 
358
	DRM_DEBUG_DRIVER("Requested: e: %d m: %d p: %d\n",
-
 
359
		  rdev->pm.power_state[rdev->pm.requested_power_state_index].
-
 
360
		  clock_info[rdev->pm.requested_clock_mode_index].sclk,
-
 
361
		  rdev->pm.power_state[rdev->pm.requested_power_state_index].
-
 
362
		  clock_info[rdev->pm.requested_clock_mode_index].mclk,
-
 
363
		  rdev->pm.power_state[rdev->pm.requested_power_state_index].
-
 
364
		  pcie_lanes);
-
 
365
}
-
 
366
 
-
 
367
void rs780_pm_init_profile(struct radeon_device *rdev)
-
 
368
{
-
 
369
	if (rdev->pm.num_power_states == 2) {
-
 
370
		/* default */
-
 
371
		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
-
 
372
		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
-
 
373
		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
-
 
374
		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
-
 
375
		/* low sh */
-
 
376
		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 0;
-
 
377
		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 0;
-
 
378
		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
-
 
379
		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
-
 
380
		/* mid sh */
-
 
381
		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 0;
-
 
382
		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 0;
-
 
383
		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
-
 
384
		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
-
 
385
		/* high sh */
-
 
386
		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0;
-
 
387
		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 1;
-
 
388
		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
-
 
389
		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
-
 
390
		/* low mh */
-
 
391
		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 0;
-
 
392
		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 0;
-
 
393
		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
-
 
394
		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
-
 
395
		/* mid mh */
-
 
396
		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 0;
-
 
397
		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 0;
-
 
398
		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
-
 
399
		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
-
 
400
		/* high mh */
-
 
401
		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0;
-
 
402
		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 1;
-
 
403
		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
-
 
404
		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
-
 
405
	} else if (rdev->pm.num_power_states == 3) {
-
 
406
		/* default */
-
 
407
		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
-
 
408
		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
-
 
409
		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
-
 
410
		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
-
 
411
		/* low sh */
-
 
412
		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 1;
-
 
413
		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1;
-
 
414
		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
-
 
415
		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
-
 
416
		/* mid sh */
-
 
417
		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 1;
-
 
418
		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 1;
-
 
419
		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
-
 
420
		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
-
 
421
		/* high sh */
-
 
422
		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 1;
-
 
423
		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 2;
-
 
424
		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
-
 
425
		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
-
 
426
		/* low mh */
-
 
427
		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 1;
-
 
428
		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 1;
-
 
429
		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
-
 
430
		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
-
 
431
		/* mid mh */
-
 
432
		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 1;
-
 
433
		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 1;
-
 
434
		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
-
 
435
		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
-
 
436
		/* high mh */
-
 
437
		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 1;
-
 
438
		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 2;
-
 
439
		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
-
 
440
		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
-
 
441
	} else {
-
 
442
		/* default */
-
 
443
		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
-
 
444
		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
-
 
445
		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
-
 
446
		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
-
 
447
		/* low sh */
-
 
448
		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 2;
-
 
449
		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 2;
-
 
450
		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
-
 
451
		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
-
 
452
		/* mid sh */
-
 
453
		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 2;
-
 
454
		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 2;
-
 
455
		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
-
 
456
		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
-
 
457
		/* high sh */
-
 
458
		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 2;
-
 
459
		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 3;
-
 
460
		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
-
 
461
		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
-
 
462
		/* low mh */
-
 
463
		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 2;
-
 
464
		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 0;
-
 
465
		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
-
 
466
		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
-
 
467
		/* mid mh */
-
 
468
		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 2;
-
 
469
		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 0;
-
 
470
		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
-
 
471
		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
-
 
472
		/* high mh */
-
 
473
		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 2;
-
 
474
		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 3;
-
 
475
		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
-
 
476
		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
-
 
477
	}
-
 
478
}
-
 
479
 
-
 
480
void r600_pm_init_profile(struct radeon_device *rdev)
-
 
481
{
-
 
482
	int idx;
-
 
483
 
-
 
484
	if (rdev->family == CHIP_R600) {
-
 
485
		/* XXX */
-
 
486
		/* default */
-
 
487
		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
-
 
488
		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
-
 
489
		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
-
 
490
		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
-
 
491
		/* low sh */
-
 
492
		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
-
 
493
		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
-
 
494
		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
-
 
495
		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
-
 
496
		/* mid sh */
-
 
497
		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
-
 
498
		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
-
 
499
		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
-
 
500
		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
-
 
501
		/* high sh */
-
 
502
		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
-
 
503
		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
-
 
504
		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
-
 
505
		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
-
 
506
		/* low mh */
-
 
507
		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
-
 
508
		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
-
 
509
		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
-
 
510
		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
-
 
511
		/* mid mh */
-
 
512
		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
-
 
513
		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
-
 
514
		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
-
 
515
		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
-
 
516
		/* high mh */
-
 
517
		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
-
 
518
		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
-
 
519
		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
-
 
520
		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
-
 
521
	} else {
-
 
522
		if (rdev->pm.num_power_states < 4) {
-
 
523
			/* default */
-
 
524
			rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
-
 
525
			rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
-
 
526
			rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
-
 
527
			rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2;
-
 
528
			/* low sh */
-
 
529
			rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 1;
-
 
530
			rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1;
-
 
531
			rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
-
 
532
			rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
-
 
533
			/* mid sh */
-
 
534
			rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 1;
-
 
535
			rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 1;
-
 
536
			rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
-
 
537
			rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
-
 
538
			/* high sh */
-
 
539
			rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 1;
-
 
540
			rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 1;
-
 
541
			rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
-
 
542
			rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2;
-
 
543
			/* low mh */
-
 
544
			rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 2;
-
 
545
			rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 2;
-
 
546
			rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
-
 
547
			rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
-
 
548
			/* low mh */
-
 
549
			rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 2;
-
 
550
			rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 2;
-
 
551
			rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
-
 
552
			rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
-
 
553
			/* high mh */
-
 
554
			rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 2;
-
 
555
			rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 2;
-
 
556
			rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
-
 
557
			rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2;
-
 
558
		} else {
-
 
559
			/* default */
-
 
560
			rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
-
 
561
			rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
-
 
562
			rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
-
 
563
			rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2;
-
 
564
			/* low sh */
-
 
565
			if (rdev->flags & RADEON_IS_MOBILITY)
-
 
566
				idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
-
 
567
			else
-
 
568
				idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
-
 
569
			rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = idx;
-
 
570
			rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = idx;
-
 
571
			rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
-
 
572
			rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
-
 
573
			/* mid sh */
-
 
574
			rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = idx;
-
 
575
			rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = idx;
-
 
576
			rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
-
 
577
			rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
-
 
578
			/* high sh */
-
 
579
			idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
-
 
580
			rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = idx;
-
 
581
			rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = idx;
-
 
582
			rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
-
 
583
			rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2;
-
 
584
			/* low mh */
-
 
585
			if (rdev->flags & RADEON_IS_MOBILITY)
-
 
586
				idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1);
-
 
587
			else
-
 
588
				idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
-
 
589
			rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = idx;
-
 
590
			rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = idx;
-
 
591
			rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
-
 
592
			rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
-
 
593
			/* mid mh */
-
 
594
			rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = idx;
-
 
595
			rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = idx;
-
 
596
			rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
-
 
597
			rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
-
 
598
			/* high mh */
-
 
599
			idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
-
 
600
			rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = idx;
-
 
601
			rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = idx;
-
 
602
			rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
-
 
603
			rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2;
-
 
604
		}
-
 
605
	}
-
 
606
}
-
 
607
 
-
 
608
void r600_pm_misc(struct radeon_device *rdev)
-
 
609
{
-
 
610
	int req_ps_idx = rdev->pm.requested_power_state_index;
-
 
611
	int req_cm_idx = rdev->pm.requested_clock_mode_index;
-
 
612
	struct radeon_power_state *ps = &rdev->pm.power_state[req_ps_idx];
-
 
613
	struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage;
-
 
614
 
-
 
615
	if ((voltage->type == VOLTAGE_SW) && voltage->voltage) {
-
 
616
		/* 0xff01 is a flag rather then an actual voltage */
-
 
617
		if (voltage->voltage == 0xff01)
-
 
618
			return;
-
 
619
		if (voltage->voltage != rdev->pm.current_vddc) {
Line 135... Line 620...
135
}
620
			radeon_atom_set_voltage(rdev, voltage->voltage, SET_VOLTAGE_TYPE_ASIC_VDDC);
136
 
621
			rdev->pm.current_vddc = voltage->voltage;
137
 
622
			DRM_DEBUG_DRIVER("Setting: v: %d\n", voltage->voltage);
138
 
623
		}
Line 481... Line 966...
481
		return -EINVAL;
966
		return -EINVAL;
482
	}
967
	}
483
	r = radeon_gart_table_vram_pin(rdev);
968
	r = radeon_gart_table_vram_pin(rdev);
484
	if (r)
969
	if (r)
485
		return r;
970
		return r;
486
	radeon_gart_restore(rdev);
-
 
Line 487... Line 971...
487
 
971
 
488
	/* Setup L2 cache */
972
	/* Setup L2 cache */
489
	WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
973
	WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
490
				ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
974
				ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
Line 617... Line 1101...
617
	return -1;
1101
	return -1;
618
}
1102
}
Line 619... Line 1103...
619
 
1103
 
620
uint32_t rs780_mc_rreg(struct radeon_device *rdev, uint32_t reg)
1104
uint32_t rs780_mc_rreg(struct radeon_device *rdev, uint32_t reg)
-
 
1105
{
621
{
1106
	unsigned long flags;
Line -... Line 1107...
-
 
1107
	uint32_t r;
622
	uint32_t r;
1108
 
623
 
1109
	spin_lock_irqsave(&rdev->mc_idx_lock, flags);
624
	WREG32(R_0028F8_MC_INDEX, S_0028F8_MC_IND_ADDR(reg));
1110
	WREG32(R_0028F8_MC_INDEX, S_0028F8_MC_IND_ADDR(reg));
-
 
1111
	r = RREG32(R_0028FC_MC_DATA);
625
	r = RREG32(R_0028FC_MC_DATA);
1112
	WREG32(R_0028F8_MC_INDEX, ~C_0028F8_MC_IND_ADDR);
626
	WREG32(R_0028F8_MC_INDEX, ~C_0028F8_MC_IND_ADDR);
1113
	spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
Line 627... Line 1114...
627
	return r;
1114
	return r;
628
}
1115
}
-
 
1116
 
-
 
1117
void rs780_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
-
 
1118
{
629
 
1119
	unsigned long flags;
630
void rs780_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
1120
 
631
{
1121
	spin_lock_irqsave(&rdev->mc_idx_lock, flags);
632
	WREG32(R_0028F8_MC_INDEX, S_0028F8_MC_IND_ADDR(reg) |
1122
	WREG32(R_0028F8_MC_INDEX, S_0028F8_MC_IND_ADDR(reg) |
-
 
1123
		S_0028F8_MC_IND_WR_EN(1));
633
		S_0028F8_MC_IND_WR_EN(1));
1124
	WREG32(R_0028FC_MC_DATA, v);
Line 634... Line 1125...
634
	WREG32(R_0028FC_MC_DATA, v);
1125
	WREG32(R_0028F8_MC_INDEX, 0x7F);
635
	WREG32(R_0028F8_MC_INDEX, 0x7F);
1126
	spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
636
}
1127
}
Line 845... Line 1336...
845
	int r;
1336
	int r;
Line 846... Line 1337...
846
 
1337
 
847
	if (rdev->vram_scratch.robj == NULL) {
1338
	if (rdev->vram_scratch.robj == NULL) {
848
		r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE,
1339
		r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE,
849
				     PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
1340
				     PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
850
				     NULL, &rdev->vram_scratch.robj);
1341
				     0, NULL, &rdev->vram_scratch.robj);
851
		if (r) {
1342
		if (r) {
852
			return r;
1343
			return r;
853
		}
1344
		}
Line 946... Line 1437...
946
	}
1437
	}
Line 947... Line 1438...
947
 
1438
 
948
	return true;
1439
	return true;
Line 949... Line 1440...
949
}
1440
}
950
 
1441
 
951
static u32 r600_gpu_check_soft_reset(struct radeon_device *rdev)
1442
u32 r600_gpu_check_soft_reset(struct radeon_device *rdev)
952
{
1443
{
Line 953... Line 1444...
953
	u32 reset_mask = 0;
1444
	u32 reset_mask = 0;
Line 1151... Line 1642...
1151
	udelay(50);
1642
	udelay(50);
Line 1152... Line 1643...
1152
 
1643
 
1153
	r600_print_gpu_status_regs(rdev);
1644
	r600_print_gpu_status_regs(rdev);
Line -... Line 1645...
-
 
1645
}
-
 
1646
 
-
 
1647
static void r600_gpu_pci_config_reset(struct radeon_device *rdev)
-
 
1648
{
-
 
1649
	struct rv515_mc_save save;
-
 
1650
	u32 tmp, i;
-
 
1651
 
-
 
1652
	dev_info(rdev->dev, "GPU pci config reset\n");
-
 
1653
 
-
 
1654
	/* disable dpm? */
-
 
1655
 
-
 
1656
	/* Disable CP parsing/prefetching */
-
 
1657
	if (rdev->family >= CHIP_RV770)
-
 
1658
		WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1) | S_0086D8_CP_PFP_HALT(1));
-
 
1659
	else
-
 
1660
		WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
-
 
1661
 
-
 
1662
	/* disable the RLC */
-
 
1663
	WREG32(RLC_CNTL, 0);
-
 
1664
 
-
 
1665
	/* Disable DMA */
-
 
1666
	tmp = RREG32(DMA_RB_CNTL);
-
 
1667
	tmp &= ~DMA_RB_ENABLE;
-
 
1668
	WREG32(DMA_RB_CNTL, tmp);
-
 
1669
 
-
 
1670
	mdelay(50);
-
 
1671
 
-
 
1672
	/* set mclk/sclk to bypass */
-
 
1673
	if (rdev->family >= CHIP_RV770)
-
 
1674
		rv770_set_clk_bypass_mode(rdev);
-
 
1675
	/* disable BM */
-
 
1676
	pci_clear_master(rdev->pdev);
-
 
1677
	/* disable mem access */
-
 
1678
	rv515_mc_stop(rdev, &save);
-
 
1679
	if (r600_mc_wait_for_idle(rdev)) {
-
 
1680
		dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
-
 
1681
	}
-
 
1682
 
-
 
1683
	/* BIF reset workaround.  Not sure if this is needed on 6xx */
-
 
1684
	tmp = RREG32(BUS_CNTL);
-
 
1685
	tmp |= VGA_COHE_SPEC_TIMER_DIS;
-
 
1686
	WREG32(BUS_CNTL, tmp);
-
 
1687
 
-
 
1688
	tmp = RREG32(BIF_SCRATCH0);
-
 
1689
 
-
 
1690
	/* reset */
-
 
1691
	radeon_pci_config_reset(rdev);
-
 
1692
	mdelay(1);
-
 
1693
 
-
 
1694
	/* BIF reset workaround.  Not sure if this is needed on 6xx */
-
 
1695
	tmp = SOFT_RESET_BIF;
-
 
1696
	WREG32(SRBM_SOFT_RESET, tmp);
-
 
1697
	mdelay(1);
-
 
1698
	WREG32(SRBM_SOFT_RESET, 0);
-
 
1699
 
-
 
1700
	/* wait for asic to come out of reset */
-
 
1701
	for (i = 0; i < rdev->usec_timeout; i++) {
-
 
1702
		if (RREG32(CONFIG_MEMSIZE) != 0xffffffff)
-
 
1703
			break;
-
 
1704
		udelay(1);
-
 
1705
	}
1154
}
1706
}
1155
 
1707
 
1156
int r600_asic_reset(struct radeon_device *rdev)
1708
int r600_asic_reset(struct radeon_device *rdev)
Line 1157... Line 1709...
1157
{
1709
{
Line 1158... Line 1710...
1158
	u32 reset_mask;
1710
	u32 reset_mask;
1159
 
1711
 
Line -... Line 1712...
-
 
1712
	reset_mask = r600_gpu_check_soft_reset(rdev);
1160
	reset_mask = r600_gpu_check_soft_reset(rdev);
1713
 
Line 1161... Line 1714...
1161
 
1714
	if (reset_mask)
Line -... Line 1715...
-
 
1715
		r600_set_bios_scratch_engine_hung(rdev, true);
-
 
1716
 
-
 
1717
	/* try soft reset */
-
 
1718
	r600_gpu_soft_reset(rdev, reset_mask);
-
 
1719
 
-
 
1720
	reset_mask = r600_gpu_check_soft_reset(rdev);
1162
	if (reset_mask)
1721
 
1163
		r600_set_bios_scratch_engine_hung(rdev, true);
1722
	/* try pci config reset */
Line 1164... Line 1723...
1164
 
1723
	if (reset_mask && radeon_hard_reset)
1165
	r600_gpu_soft_reset(rdev, reset_mask);
1724
		r600_gpu_pci_config_reset(rdev);
Line 1186... Line 1745...
1186
	u32 reset_mask = r600_gpu_check_soft_reset(rdev);
1745
	u32 reset_mask = r600_gpu_check_soft_reset(rdev);
Line 1187... Line 1746...
1187
 
1746
 
1188
	if (!(reset_mask & (RADEON_RESET_GFX |
1747
	if (!(reset_mask & (RADEON_RESET_GFX |
1189
			    RADEON_RESET_COMPUTE |
1748
			    RADEON_RESET_COMPUTE |
1190
			    RADEON_RESET_CP))) {
1749
			    RADEON_RESET_CP))) {
1191
		radeon_ring_lockup_update(ring);
-
 
1192
		return false;
-
 
1193
	}
-
 
1194
	/* force CP activities */
-
 
1195
	radeon_ring_force_activity(rdev, ring);
-
 
1196
	return radeon_ring_test_lockup(rdev, ring);
-
 
1197
}
-
 
1198
 
-
 
1199
/**
-
 
1200
 * r600_dma_is_lockup - Check if the DMA engine is locked up
-
 
1201
 *
-
 
1202
 * @rdev: radeon_device pointer
-
 
1203
 * @ring: radeon_ring structure holding ring information
-
 
1204
 *
-
 
1205
 * Check if the async DMA engine is locked up.
-
 
1206
 * Returns true if the engine appears to be locked up, false if not.
-
 
1207
 */
-
 
1208
bool r600_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
-
 
1209
{
-
 
1210
	u32 reset_mask = r600_gpu_check_soft_reset(rdev);
-
 
1211
 
-
 
1212
	if (!(reset_mask & RADEON_RESET_DMA)) {
-
 
1213
		radeon_ring_lockup_update(ring);
1750
		radeon_ring_lockup_update(rdev, ring);
1214
		return false;
1751
		return false;
1215
	}
-
 
1216
	/* force ring activities */
-
 
1217
	radeon_ring_force_activity(rdev, ring);
1752
	}
1218
	return radeon_ring_test_lockup(rdev, ring);
1753
	return radeon_ring_test_lockup(rdev, ring);
Line 1219... Line 1754...
1219
}
1754
}
1220
 
1755
 
Line 1275... Line 1810...
1275
 
1810
 
1276
static void r600_gpu_init(struct radeon_device *rdev)
1811
static void r600_gpu_init(struct radeon_device *rdev)
1277
{
1812
{
1278
	u32 tiling_config;
1813
	u32 tiling_config;
1279
	u32 ramcfg;
-
 
1280
	u32 cc_rb_backend_disable;
1814
	u32 ramcfg;
1281
	u32 cc_gc_shader_pipe_config;
1815
	u32 cc_gc_shader_pipe_config;
1282
	u32 tmp;
1816
	u32 tmp;
1283
	int i, j;
1817
	int i, j;
1284
	u32 sq_config;
1818
	u32 sq_config;
Line 1402... Line 1936...
1402
		tiling_config |= ROW_TILING(tmp);
1936
		tiling_config |= ROW_TILING(tmp);
1403
		tiling_config |= SAMPLE_SPLIT(tmp);
1937
		tiling_config |= SAMPLE_SPLIT(tmp);
1404
	}
1938
	}
1405
	tiling_config |= BANK_SWAPS(1);
1939
	tiling_config |= BANK_SWAPS(1);
Line 1406... Line -...
1406
 
-
 
1407
	cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000;
-
 
1408
	tmp = R6XX_MAX_BACKENDS -
-
 
1409
		r600_count_pipe_bits((cc_rb_backend_disable >> 16) & R6XX_MAX_BACKENDS_MASK);
-
 
1410
	if (tmp < rdev->config.r600.max_backends) {
-
 
1411
		rdev->config.r600.max_backends = tmp;
-
 
1412
	}
-
 
1413
 
1940
 
1414
	cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0x00ffff00;
-
 
1415
	tmp = R6XX_MAX_PIPES -
-
 
1416
		r600_count_pipe_bits((cc_gc_shader_pipe_config >> 8) & R6XX_MAX_PIPES_MASK);
1941
	cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0x00ffff00;
1417
	if (tmp < rdev->config.r600.max_pipes) {
-
 
1418
		rdev->config.r600.max_pipes = tmp;
-
 
1419
	}
-
 
1420
	tmp = R6XX_MAX_SIMDS -
1942
	tmp = rdev->config.r600.max_simds -
1421
		r600_count_pipe_bits((cc_gc_shader_pipe_config >> 16) & R6XX_MAX_SIMDS_MASK);
-
 
1422
	if (tmp < rdev->config.r600.max_simds) {
1943
		r600_count_pipe_bits((cc_gc_shader_pipe_config >> 16) & R6XX_MAX_SIMDS_MASK);
1423
		rdev->config.r600.max_simds = tmp;
-
 
Line 1424... Line 1944...
1424
	}
1944
	rdev->config.r600.active_simds = tmp;
-
 
1945
 
-
 
1946
	disabled_rb_mask = (RREG32(CC_RB_BACKEND_DISABLE) >> 16) & R6XX_MAX_BACKENDS_MASK;
-
 
1947
	tmp = 0;
-
 
1948
	for (i = 0; i < rdev->config.r600.max_backends; i++)
-
 
1949
		tmp |= (1 << i);
-
 
1950
	/* if all the backends are disabled, fix it up here */
-
 
1951
	if ((disabled_rb_mask & tmp) == tmp) {
-
 
1952
		for (i = 0; i < rdev->config.r600.max_backends; i++)
1425
 
1953
			disabled_rb_mask &= ~(1 << i);
1426
	disabled_rb_mask = (RREG32(CC_RB_BACKEND_DISABLE) >> 16) & R6XX_MAX_BACKENDS_MASK;
1954
	}
1427
	tmp = (tiling_config & PIPE_TILING__MASK) >> PIPE_TILING__SHIFT;
1955
	tmp = (tiling_config & PIPE_TILING__MASK) >> PIPE_TILING__SHIFT;
1428
	tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.r600.max_backends,
1956
	tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.r600.max_backends,
1429
					R6XX_MAX_BACKENDS, disabled_rb_mask);
1957
					R6XX_MAX_BACKENDS, disabled_rb_mask);
Line 1686... Line 2214...
1686
/*
2214
/*
1687
 * Indirect registers accessor
2215
 * Indirect registers accessor
1688
 */
2216
 */
1689
u32 r600_pciep_rreg(struct radeon_device *rdev, u32 reg)
2217
u32 r600_pciep_rreg(struct radeon_device *rdev, u32 reg)
1690
{
2218
{
-
 
2219
	unsigned long flags;
1691
	u32 r;
2220
	u32 r;
Line -... Line 2221...
-
 
2221
 
1692
 
2222
	spin_lock_irqsave(&rdev->pciep_idx_lock, flags);
1693
	WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
2223
	WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
1694
	(void)RREG32(PCIE_PORT_INDEX);
2224
	(void)RREG32(PCIE_PORT_INDEX);
-
 
2225
	r = RREG32(PCIE_PORT_DATA);
1695
	r = RREG32(PCIE_PORT_DATA);
2226
	spin_unlock_irqrestore(&rdev->pciep_idx_lock, flags);
1696
	return r;
2227
	return r;
Line 1697... Line 2228...
1697
}
2228
}
1698
 
2229
 
-
 
2230
void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v)
-
 
2231
{
-
 
2232
	unsigned long flags;
1699
void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v)
2233
 
1700
{
2234
	spin_lock_irqsave(&rdev->pciep_idx_lock, flags);
1701
	WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
2235
	WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
1702
	(void)RREG32(PCIE_PORT_INDEX);
2236
	(void)RREG32(PCIE_PORT_INDEX);
-
 
2237
	WREG32(PCIE_PORT_DATA, (v));
1703
	WREG32(PCIE_PORT_DATA, (v));
2238
	(void)RREG32(PCIE_PORT_DATA);
Line 1704... Line 2239...
1704
	(void)RREG32(PCIE_PORT_DATA);
2239
	spin_unlock_irqrestore(&rdev->pciep_idx_lock, flags);
1705
}
2240
}
1706
 
2241
 
1707
/*
2242
/*
1708
 * CP & Ring
2243
 * CP & Ring
-
 
2244
 */
1709
 */
2245
void r600_cp_stop(struct radeon_device *rdev)
1710
void r600_cp_stop(struct radeon_device *rdev)
2246
{
1711
{
2247
	if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX)
1712
	radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
2248
	radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
1713
	WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
2249
	WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
Line 1714... Line 2250...
1714
	WREG32(SCRATCH_UMSK, 0);
2250
	WREG32(SCRATCH_UMSK, 0);
1715
	rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
2251
	rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
1716
}
-
 
1717
 
2252
}
1718
int r600_init_microcode(struct radeon_device *rdev)
2253
 
-
 
2254
int r600_init_microcode(struct radeon_device *rdev)
1719
{
2255
{
1720
	struct platform_device *pdev;
2256
	const char *chip_name;
1721
	const char *chip_name;
2257
	const char *rlc_chip_name;
Line 1722... Line 2258...
1722
	const char *rlc_chip_name;
2258
	const char *smc_chip_name = "RV770";
Line 1723... Line -...
1723
	size_t pfp_req_size, me_req_size, rlc_req_size;
-
 
1724
	char fw_name[30];
-
 
1725
	int err;
-
 
1726
 
-
 
1727
	DRM_DEBUG("\n");
-
 
1728
 
-
 
1729
	pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0);
-
 
1730
	err = IS_ERR(pdev);
2259
	size_t pfp_req_size, me_req_size, rlc_req_size, smc_req_size = 0;
1731
	if (err) {
2260
	char fw_name[30];
1732
		printk(KERN_ERR "radeon_cp: Failed to register firmware\n");
2261
	int err;
1733
		return -EINVAL;
2262
 
1734
	}
2263
	DRM_DEBUG("\n");
Line 1764... Line 2293...
1764
		rlc_chip_name = "R600";
2293
		rlc_chip_name = "R600";
1765
		break;
2294
		break;
1766
	case CHIP_RV770:
2295
	case CHIP_RV770:
1767
		chip_name = "RV770";
2296
		chip_name = "RV770";
1768
		rlc_chip_name = "R700";
2297
		rlc_chip_name = "R700";
-
 
2298
		smc_chip_name = "RV770";
-
 
2299
		smc_req_size = ALIGN(RV770_SMC_UCODE_SIZE, 4);
1769
		break;
2300
		break;
1770
	case CHIP_RV730:
2301
	case CHIP_RV730:
1771
	case CHIP_RV740:
-
 
1772
		chip_name = "RV730";
2302
		chip_name = "RV730";
1773
		rlc_chip_name = "R700";
2303
		rlc_chip_name = "R700";
-
 
2304
		smc_chip_name = "RV730";
-
 
2305
		smc_req_size = ALIGN(RV730_SMC_UCODE_SIZE, 4);
1774
		break;
2306
		break;
1775
	case CHIP_RV710:
2307
	case CHIP_RV710:
1776
		chip_name = "RV710";
2308
		chip_name = "RV710";
1777
		rlc_chip_name = "R700";
2309
		rlc_chip_name = "R700";
-
 
2310
		smc_chip_name = "RV710";
-
 
2311
		smc_req_size = ALIGN(RV710_SMC_UCODE_SIZE, 4);
-
 
2312
		break;
-
 
2313
	case CHIP_RV740:
-
 
2314
		chip_name = "RV730";
-
 
2315
		rlc_chip_name = "R700";
-
 
2316
		smc_chip_name = "RV740";
-
 
2317
		smc_req_size = ALIGN(RV740_SMC_UCODE_SIZE, 4);
1778
		break;
2318
		break;
1779
	case CHIP_CEDAR:
2319
	case CHIP_CEDAR:
1780
		chip_name = "CEDAR";
2320
		chip_name = "CEDAR";
1781
		rlc_chip_name = "CEDAR";
2321
		rlc_chip_name = "CEDAR";
-
 
2322
		smc_chip_name = "CEDAR";
-
 
2323
		smc_req_size = ALIGN(CEDAR_SMC_UCODE_SIZE, 4);
1782
		break;
2324
		break;
1783
	case CHIP_REDWOOD:
2325
	case CHIP_REDWOOD:
1784
		chip_name = "REDWOOD";
2326
		chip_name = "REDWOOD";
1785
		rlc_chip_name = "REDWOOD";
2327
		rlc_chip_name = "REDWOOD";
-
 
2328
		smc_chip_name = "REDWOOD";
-
 
2329
		smc_req_size = ALIGN(REDWOOD_SMC_UCODE_SIZE, 4);
1786
		break;
2330
		break;
1787
	case CHIP_JUNIPER:
2331
	case CHIP_JUNIPER:
1788
		chip_name = "JUNIPER";
2332
		chip_name = "JUNIPER";
1789
		rlc_chip_name = "JUNIPER";
2333
		rlc_chip_name = "JUNIPER";
-
 
2334
		smc_chip_name = "JUNIPER";
-
 
2335
		smc_req_size = ALIGN(JUNIPER_SMC_UCODE_SIZE, 4);
1790
		break;
2336
		break;
1791
	case CHIP_CYPRESS:
2337
	case CHIP_CYPRESS:
1792
	case CHIP_HEMLOCK:
2338
	case CHIP_HEMLOCK:
1793
		chip_name = "CYPRESS";
2339
		chip_name = "CYPRESS";
1794
		rlc_chip_name = "CYPRESS";
2340
		rlc_chip_name = "CYPRESS";
-
 
2341
		smc_chip_name = "CYPRESS";
-
 
2342
		smc_req_size = ALIGN(CYPRESS_SMC_UCODE_SIZE, 4);
1795
		break;
2343
		break;
1796
	case CHIP_PALM:
2344
	case CHIP_PALM:
1797
		chip_name = "PALM";
2345
		chip_name = "PALM";
1798
		rlc_chip_name = "SUMO";
2346
		rlc_chip_name = "SUMO";
1799
		break;
2347
		break;
Line 1815... Line 2363...
1815
	} else if (rdev->family >= CHIP_RV770) {
2363
	} else if (rdev->family >= CHIP_RV770) {
1816
		pfp_req_size = R700_PFP_UCODE_SIZE * 4;
2364
		pfp_req_size = R700_PFP_UCODE_SIZE * 4;
1817
		me_req_size = R700_PM4_UCODE_SIZE * 4;
2365
		me_req_size = R700_PM4_UCODE_SIZE * 4;
1818
		rlc_req_size = R700_RLC_UCODE_SIZE * 4;
2366
		rlc_req_size = R700_RLC_UCODE_SIZE * 4;
1819
	} else {
2367
	} else {
1820
		pfp_req_size = PFP_UCODE_SIZE * 4;
2368
		pfp_req_size = R600_PFP_UCODE_SIZE * 4;
1821
		me_req_size = PM4_UCODE_SIZE * 12;
2369
		me_req_size = R600_PM4_UCODE_SIZE * 12;
1822
		rlc_req_size = RLC_UCODE_SIZE * 4;
2370
		rlc_req_size = R600_RLC_UCODE_SIZE * 4;
1823
	}
2371
	}
Line 1824... Line 2372...
1824
 
2372
 
Line 1825... Line 2373...
1825
	DRM_INFO("Loading %s Microcode\n", chip_name);
2373
	DRM_INFO("Loading %s Microcode\n", chip_name);
1826
 
2374
 
1827
	snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
2375
	snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
1828
	err = request_firmware(&rdev->pfp_fw, fw_name, &pdev->dev);
2376
	err = request_firmware(&rdev->pfp_fw, fw_name, rdev->dev);
1829
	if (err)
2377
	if (err)
1830
		goto out;
2378
		goto out;
1831
	if (rdev->pfp_fw->size != pfp_req_size) {
2379
	if (rdev->pfp_fw->size != pfp_req_size) {
Line 1835... Line 2383...
1835
		err = -EINVAL;
2383
		err = -EINVAL;
1836
		goto out;
2384
		goto out;
1837
	}
2385
	}
Line 1838... Line 2386...
1838
 
2386
 
1839
	snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
2387
	snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
1840
	err = request_firmware(&rdev->me_fw, fw_name, &pdev->dev);
2388
	err = request_firmware(&rdev->me_fw, fw_name, rdev->dev);
1841
	if (err)
2389
	if (err)
1842
		goto out;
2390
		goto out;
1843
	if (rdev->me_fw->size != me_req_size) {
2391
	if (rdev->me_fw->size != me_req_size) {
1844
		printk(KERN_ERR
2392
		printk(KERN_ERR
1845
		       "r600_cp: Bogus length %zu in firmware \"%s\"\n",
2393
		       "r600_cp: Bogus length %zu in firmware \"%s\"\n",
1846
		       rdev->me_fw->size, fw_name);
2394
		       rdev->me_fw->size, fw_name);
1847
		err = -EINVAL;
2395
		err = -EINVAL;
Line 1848... Line 2396...
1848
	}
2396
	}
1849
 
2397
 
1850
	snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name);
2398
	snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name);
1851
	err = request_firmware(&rdev->rlc_fw, fw_name, &pdev->dev);
2399
	err = request_firmware(&rdev->rlc_fw, fw_name, rdev->dev);
1852
	if (err)
2400
	if (err)
1853
		goto out;
2401
		goto out;
1854
	if (rdev->rlc_fw->size != rlc_req_size) {
2402
	if (rdev->rlc_fw->size != rlc_req_size) {
1855
		printk(KERN_ERR
2403
		printk(KERN_ERR
1856
		       "r600_rlc: Bogus length %zu in firmware \"%s\"\n",
2404
		       "r600_rlc: Bogus length %zu in firmware \"%s\"\n",
1857
		       rdev->rlc_fw->size, fw_name);
2405
		       rdev->rlc_fw->size, fw_name);
Line -... Line 2406...
-
 
2406
		err = -EINVAL;
-
 
2407
	}
-
 
2408
 
-
 
2409
	if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_HEMLOCK)) {
-
 
2410
		snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", smc_chip_name);
-
 
2411
		err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev);
-
 
2412
		if (err) {
-
 
2413
			printk(KERN_ERR
-
 
2414
			       "smc: error loading firmware \"%s\"\n",
1858
		err = -EINVAL;
2415
			       fw_name);
-
 
2416
			release_firmware(rdev->smc_fw);
-
 
2417
			rdev->smc_fw = NULL;
-
 
2418
			err = 0;
1859
	}
2419
		} else if (rdev->smc_fw->size != smc_req_size) {
-
 
2420
			printk(KERN_ERR
-
 
2421
			       "smc: Bogus length %zu in firmware \"%s\"\n",
-
 
2422
			       rdev->smc_fw->size, fw_name);
Line -... Line 2423...
-
 
2423
			err = -EINVAL;
1860
 
2424
		}
1861
out:
2425
	}
1862
	platform_device_unregister(pdev);
2426
 
1863
 
2427
out:
1864
	if (err) {
2428
	if (err) {
Line 1870... Line 2434...
1870
		rdev->pfp_fw = NULL;
2434
		rdev->pfp_fw = NULL;
1871
		release_firmware(rdev->me_fw);
2435
		release_firmware(rdev->me_fw);
1872
		rdev->me_fw = NULL;
2436
		rdev->me_fw = NULL;
1873
		release_firmware(rdev->rlc_fw);
2437
		release_firmware(rdev->rlc_fw);
1874
		rdev->rlc_fw = NULL;
2438
		rdev->rlc_fw = NULL;
-
 
2439
		release_firmware(rdev->smc_fw);
-
 
2440
		rdev->smc_fw = NULL;
1875
	}
2441
	}
1876
	return err;
2442
	return err;
1877
}
2443
}
Line -... Line 2444...
-
 
2444
 
-
 
2445
u32 r600_gfx_get_rptr(struct radeon_device *rdev,
-
 
2446
		      struct radeon_ring *ring)
-
 
2447
{
-
 
2448
	u32 rptr;
-
 
2449
 
-
 
2450
	if (rdev->wb.enabled)
-
 
2451
		rptr = rdev->wb.wb[ring->rptr_offs/4];
-
 
2452
	else
-
 
2453
		rptr = RREG32(R600_CP_RB_RPTR);
-
 
2454
 
-
 
2455
	return rptr;
-
 
2456
}
-
 
2457
 
-
 
2458
u32 r600_gfx_get_wptr(struct radeon_device *rdev,
-
 
2459
		      struct radeon_ring *ring)
-
 
2460
{
-
 
2461
	u32 wptr;
-
 
2462
 
-
 
2463
	wptr = RREG32(R600_CP_RB_WPTR);
-
 
2464
 
-
 
2465
	return wptr;
-
 
2466
}
-
 
2467
 
-
 
2468
void r600_gfx_set_wptr(struct radeon_device *rdev,
-
 
2469
		       struct radeon_ring *ring)
-
 
2470
{
-
 
2471
	WREG32(R600_CP_RB_WPTR, ring->wptr);
-
 
2472
	(void)RREG32(R600_CP_RB_WPTR);
-
 
2473
}
1878
 
2474
 
1879
static int r600_cp_load_microcode(struct radeon_device *rdev)
2475
static int r600_cp_load_microcode(struct radeon_device *rdev)
1880
{
2476
{
1881
	const __be32 *fw_data;
2477
	const __be32 *fw_data;
Line 1900... Line 2496...
1900
 
2496
 
Line 1901... Line 2497...
1901
	WREG32(CP_ME_RAM_WADDR, 0);
2497
	WREG32(CP_ME_RAM_WADDR, 0);
1902
 
2498
 
1903
	fw_data = (const __be32 *)rdev->me_fw->data;
2499
	fw_data = (const __be32 *)rdev->me_fw->data;
1904
	WREG32(CP_ME_RAM_WADDR, 0);
2500
	WREG32(CP_ME_RAM_WADDR, 0);
1905
	for (i = 0; i < PM4_UCODE_SIZE * 3; i++)
2501
	for (i = 0; i < R600_PM4_UCODE_SIZE * 3; i++)
Line 1906... Line 2502...
1906
		WREG32(CP_ME_RAM_DATA,
2502
		WREG32(CP_ME_RAM_DATA,
1907
		       be32_to_cpup(fw_data++));
2503
		       be32_to_cpup(fw_data++));
1908
 
2504
 
1909
	fw_data = (const __be32 *)rdev->pfp_fw->data;
2505
	fw_data = (const __be32 *)rdev->pfp_fw->data;
1910
	WREG32(CP_PFP_UCODE_ADDR, 0);
2506
	WREG32(CP_PFP_UCODE_ADDR, 0);
Line 1911... Line 2507...
1911
	for (i = 0; i < PFP_UCODE_SIZE; i++)
2507
	for (i = 0; i < R600_PFP_UCODE_SIZE; i++)
1912
		WREG32(CP_PFP_UCODE_DATA,
2508
		WREG32(CP_PFP_UCODE_DATA,
Line 1939... Line 2535...
1939
		radeon_ring_write(ring, rdev->config.r600.max_hw_contexts - 1);
2535
		radeon_ring_write(ring, rdev->config.r600.max_hw_contexts - 1);
1940
	}
2536
	}
1941
	radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
2537
	radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
1942
	radeon_ring_write(ring, 0);
2538
	radeon_ring_write(ring, 0);
1943
	radeon_ring_write(ring, 0);
2539
	radeon_ring_write(ring, 0);
1944
	radeon_ring_unlock_commit(rdev, ring);
2540
	radeon_ring_unlock_commit(rdev, ring, false);
Line 1945... Line 2541...
1945
 
2541
 
1946
	cp_me = 0xff;
2542
	cp_me = 0xff;
1947
	WREG32(R_0086D8_CP_ME_CNTL, cp_me);
2543
	WREG32(R_0086D8_CP_ME_CNTL, cp_me);
1948
	return 0;
2544
	return 0;
Line 1960... Line 2556...
1960
	RREG32(GRBM_SOFT_RESET);
2556
	RREG32(GRBM_SOFT_RESET);
1961
	mdelay(15);
2557
	mdelay(15);
1962
	WREG32(GRBM_SOFT_RESET, 0);
2558
	WREG32(GRBM_SOFT_RESET, 0);
Line 1963... Line 2559...
1963
 
2559
 
1964
	/* Set ring buffer size */
2560
	/* Set ring buffer size */
1965
	rb_bufsz = drm_order(ring->ring_size / 8);
2561
	rb_bufsz = order_base_2(ring->ring_size / 8);
1966
	tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
2562
	tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
1967
#ifdef __BIG_ENDIAN
2563
#ifdef __BIG_ENDIAN
1968
	tmp |= BUF_SWAP_32BIT;
2564
	tmp |= BUF_SWAP_32BIT;
1969
#endif
2565
#endif
1970
	WREG32(CP_RB_CNTL, tmp);
2566
	WREG32(CP_RB_CNTL, tmp);
Line 1996... Line 2592...
1996
	WREG32(CP_RB_CNTL, tmp);
2592
	WREG32(CP_RB_CNTL, tmp);
Line 1997... Line 2593...
1997
 
2593
 
1998
	WREG32(CP_RB_BASE, ring->gpu_addr >> 8);
2594
	WREG32(CP_RB_BASE, ring->gpu_addr >> 8);
Line 1999... Line -...
1999
	WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
-
 
2000
 
-
 
2001
	ring->rptr = RREG32(CP_RB_RPTR);
2595
	WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
2002
 
2596
 
2003
	r600_cp_start(rdev);
2597
	r600_cp_start(rdev);
2004
	ring->ready = true;
2598
	ring->ready = true;
2005
	r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, ring);
2599
	r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, ring);
2006
	if (r) {
2600
	if (r) {
2007
		ring->ready = false;
2601
		ring->ready = false;
-
 
2602
		return r;
-
 
2603
	}
-
 
2604
 
-
 
2605
	if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX)
2008
		return r;
2606
		radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
2009
	}
2607
 
Line 2010... Line 2608...
2010
	return 0;
2608
	return 0;
2011
}
2609
}
2012
 
2610
 
2013
void r600_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ring_size)
2611
void r600_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ring_size)
Line 2014... Line 2612...
2014
{
2612
{
2015
	u32 rb_bufsz;
2613
	u32 rb_bufsz;
2016
	int r;
2614
	int r;
2017
 
2615
 
2018
	/* Align ring size */
2616
	/* Align ring size */
Line 2019... Line 2617...
2019
	rb_bufsz = drm_order(ring_size / 8);
2617
	rb_bufsz = order_base_2(ring_size / 8);
Line 2037... Line 2635...
2037
	radeon_ring_fini(rdev, ring);
2635
	radeon_ring_fini(rdev, ring);
2038
	radeon_scratch_free(rdev, ring->rptr_save_reg);
2636
	radeon_scratch_free(rdev, ring->rptr_save_reg);
2039
}
2637
}
Line 2040... Line 2638...
2040
 
2638
 
2041
/*
-
 
2042
 * DMA
-
 
2043
 * Starting with R600, the GPU has an asynchronous
-
 
2044
 * DMA engine.  The programming model is very similar
-
 
2045
 * to the 3D engine (ring buffer, IBs, etc.), but the
-
 
2046
 * DMA controller has it's own packet format that is
-
 
2047
 * different form the PM4 format used by the 3D engine.
-
 
2048
 * It supports copying data, writing embedded data,
-
 
2049
 * solid fills, and a number of other things.  It also
-
 
2050
 * has support for tiling/detiling of buffers.
-
 
2051
 */
-
 
2052
/**
-
 
2053
 * r600_dma_stop - stop the async dma engine
-
 
2054
 *
-
 
2055
 * @rdev: radeon_device pointer
-
 
2056
 *
-
 
2057
 * Stop the async dma engine (r6xx-evergreen).
-
 
2058
 */
-
 
2059
void r600_dma_stop(struct radeon_device *rdev)
-
 
2060
{
-
 
2061
	u32 rb_cntl = RREG32(DMA_RB_CNTL);
-
 
2062
 
-
 
2063
	radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
-
 
2064
 
-
 
2065
	rb_cntl &= ~DMA_RB_ENABLE;
-
 
2066
	WREG32(DMA_RB_CNTL, rb_cntl);
-
 
2067
 
-
 
2068
	rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false;
-
 
2069
}
-
 
2070
 
-
 
2071
/**
-
 
2072
 * r600_dma_resume - setup and start the async dma engine
-
 
2073
 *
-
 
2074
 * @rdev: radeon_device pointer
-
 
2075
 *
-
 
2076
 * Set up the DMA ring buffer and enable it. (r6xx-evergreen).
-
 
2077
 * Returns 0 for success, error for failure.
-
 
2078
 */
-
 
2079
int r600_dma_resume(struct radeon_device *rdev)
-
 
2080
{
-
 
2081
	struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
-
 
2082
	u32 rb_cntl, dma_cntl, ib_cntl;
-
 
2083
	u32 rb_bufsz;
-
 
2084
	int r;
-
 
2085
 
-
 
2086
	/* Reset dma */
-
 
2087
	if (rdev->family >= CHIP_RV770)
-
 
2088
		WREG32(SRBM_SOFT_RESET, RV770_SOFT_RESET_DMA);
-
 
2089
	else
-
 
2090
		WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA);
-
 
2091
	RREG32(SRBM_SOFT_RESET);
-
 
2092
	udelay(50);
-
 
2093
	WREG32(SRBM_SOFT_RESET, 0);
-
 
2094
 
-
 
2095
	WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL, 0);
-
 
2096
	WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL, 0);
-
 
2097
 
-
 
2098
	/* Set ring buffer size in dwords */
-
 
2099
	rb_bufsz = drm_order(ring->ring_size / 4);
-
 
2100
	rb_cntl = rb_bufsz << 1;
-
 
2101
#ifdef __BIG_ENDIAN
-
 
2102
	rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE;
-
 
2103
#endif
-
 
2104
	WREG32(DMA_RB_CNTL, rb_cntl);
-
 
2105
 
-
 
2106
	/* Initialize the ring buffer's read and write pointers */
-
 
2107
	WREG32(DMA_RB_RPTR, 0);
-
 
2108
	WREG32(DMA_RB_WPTR, 0);
-
 
2109
 
-
 
2110
	/* set the wb address whether it's enabled or not */
-
 
2111
	WREG32(DMA_RB_RPTR_ADDR_HI,
-
 
2112
	       upper_32_bits(rdev->wb.gpu_addr + R600_WB_DMA_RPTR_OFFSET) & 0xFF);
-
 
2113
	WREG32(DMA_RB_RPTR_ADDR_LO,
-
 
2114
	       ((rdev->wb.gpu_addr + R600_WB_DMA_RPTR_OFFSET) & 0xFFFFFFFC));
-
 
2115
 
-
 
2116
	if (rdev->wb.enabled)
-
 
2117
		rb_cntl |= DMA_RPTR_WRITEBACK_ENABLE;
-
 
2118
 
-
 
2119
	WREG32(DMA_RB_BASE, ring->gpu_addr >> 8);
-
 
2120
 
-
 
2121
	/* enable DMA IBs */
-
 
2122
	ib_cntl = DMA_IB_ENABLE;
-
 
2123
#ifdef __BIG_ENDIAN
-
 
2124
	ib_cntl |= DMA_IB_SWAP_ENABLE;
-
 
2125
#endif
-
 
2126
	WREG32(DMA_IB_CNTL, ib_cntl);
-
 
2127
 
-
 
2128
	dma_cntl = RREG32(DMA_CNTL);
-
 
2129
	dma_cntl &= ~CTXEMPTY_INT_ENABLE;
-
 
2130
	WREG32(DMA_CNTL, dma_cntl);
-
 
2131
 
-
 
2132
	if (rdev->family >= CHIP_RV770)
-
 
2133
		WREG32(DMA_MODE, 1);
-
 
2134
 
-
 
2135
	ring->wptr = 0;
-
 
2136
	WREG32(DMA_RB_WPTR, ring->wptr << 2);
-
 
2137
 
-
 
2138
	ring->rptr = RREG32(DMA_RB_RPTR) >> 2;
-
 
2139
 
-
 
2140
	WREG32(DMA_RB_CNTL, rb_cntl | DMA_RB_ENABLE);
-
 
2141
 
-
 
2142
	ring->ready = true;
-
 
2143
 
-
 
2144
	r = radeon_ring_test(rdev, R600_RING_TYPE_DMA_INDEX, ring);
-
 
2145
	if (r) {
-
 
2146
		ring->ready = false;
-
 
2147
		return r;
-
 
2148
	}
-
 
2149
 
-
 
2150
	radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
-
 
2151
 
-
 
2152
	return 0;
-
 
2153
}
-
 
2154
 
-
 
2155
/**
-
 
2156
 * r600_dma_fini - tear down the async dma engine
-
 
2157
 *
-
 
2158
 * @rdev: radeon_device pointer
-
 
2159
 *
-
 
2160
 * Stop the async dma engine and free the ring (r6xx-evergreen).
-
 
2161
 */
-
 
2162
void r600_dma_fini(struct radeon_device *rdev)
-
 
2163
{
-
 
2164
	r600_dma_stop(rdev);
-
 
2165
	radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]);
-
 
2166
}
-
 
2167
 
-
 
2168
/*
-
 
2169
 * UVD
-
 
2170
 */
-
 
2171
int r600_uvd_rbc_start(struct radeon_device *rdev)
-
 
2172
{
-
 
2173
	struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
-
 
2174
	uint64_t rptr_addr;
-
 
2175
	uint32_t rb_bufsz, tmp;
-
 
2176
	int r;
-
 
2177
 
-
 
2178
	rptr_addr = rdev->wb.gpu_addr + R600_WB_UVD_RPTR_OFFSET;
-
 
2179
 
-
 
2180
	if (upper_32_bits(rptr_addr) != upper_32_bits(ring->gpu_addr)) {
-
 
2181
		DRM_ERROR("UVD ring and rptr not in the same 4GB segment!\n");
-
 
2182
		return -EINVAL;
-
 
2183
	}
-
 
2184
 
-
 
2185
	/* force RBC into idle state */
-
 
2186
	WREG32(UVD_RBC_RB_CNTL, 0x11010101);
-
 
2187
 
-
 
2188
	/* Set the write pointer delay */
-
 
2189
	WREG32(UVD_RBC_RB_WPTR_CNTL, 0);
-
 
2190
 
-
 
2191
	/* set the wb address */
-
 
2192
	WREG32(UVD_RBC_RB_RPTR_ADDR, rptr_addr >> 2);
-
 
2193
 
-
 
2194
	/* programm the 4GB memory segment for rptr and ring buffer */
-
 
2195
	WREG32(UVD_LMI_EXT40_ADDR, upper_32_bits(rptr_addr) |
-
 
2196
				   (0x7 << 16) | (0x1 << 31));
-
 
2197
 
-
 
2198
	/* Initialize the ring buffer's read and write pointers */
-
 
2199
	WREG32(UVD_RBC_RB_RPTR, 0x0);
-
 
2200
 
-
 
2201
	ring->wptr = ring->rptr = RREG32(UVD_RBC_RB_RPTR);
-
 
2202
	WREG32(UVD_RBC_RB_WPTR, ring->wptr);
-
 
2203
 
-
 
2204
	/* set the ring address */
-
 
2205
	WREG32(UVD_RBC_RB_BASE, ring->gpu_addr);
-
 
2206
 
-
 
2207
	/* Set ring buffer size */
-
 
2208
	rb_bufsz = drm_order(ring->ring_size);
-
 
2209
	rb_bufsz = (0x1 << 8) | rb_bufsz;
-
 
2210
	WREG32(UVD_RBC_RB_CNTL, rb_bufsz);
-
 
2211
 
-
 
2212
	ring->ready = true;
-
 
2213
	r = radeon_ring_test(rdev, R600_RING_TYPE_UVD_INDEX, ring);
-
 
2214
	if (r) {
-
 
2215
		ring->ready = false;
-
 
2216
		return r;
-
 
2217
	}
-
 
2218
 
-
 
2219
	r = radeon_ring_lock(rdev, ring, 10);
-
 
2220
	if (r) {
-
 
2221
		DRM_ERROR("radeon: ring failed to lock UVD ring (%d).\n", r);
-
 
2222
		return r;
-
 
2223
	}
-
 
2224
 
-
 
2225
	tmp = PACKET0(UVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL, 0);
-
 
2226
	radeon_ring_write(ring, tmp);
-
 
2227
	radeon_ring_write(ring, 0xFFFFF);
-
 
2228
 
-
 
2229
	tmp = PACKET0(UVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL, 0);
-
 
2230
	radeon_ring_write(ring, tmp);
-
 
2231
	radeon_ring_write(ring, 0xFFFFF);
-
 
2232
 
-
 
2233
	tmp = PACKET0(UVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL, 0);
-
 
2234
	radeon_ring_write(ring, tmp);
-
 
2235
	radeon_ring_write(ring, 0xFFFFF);
-
 
2236
 
-
 
2237
	/* Clear timeout status bits */
-
 
2238
	radeon_ring_write(ring, PACKET0(UVD_SEMA_TIMEOUT_STATUS, 0));
-
 
2239
	radeon_ring_write(ring, 0x8);
-
 
2240
 
-
 
2241
	radeon_ring_write(ring, PACKET0(UVD_SEMA_CNTL, 0));
-
 
2242
	radeon_ring_write(ring, 3);
-
 
2243
 
-
 
2244
	radeon_ring_unlock_commit(rdev, ring);
-
 
2245
 
-
 
2246
	return 0;
-
 
2247
}
-
 
2248
 
-
 
2249
void r600_uvd_rbc_stop(struct radeon_device *rdev)
-
 
2250
{
-
 
2251
	struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
-
 
2252
 
-
 
2253
	/* force RBC into idle state */
-
 
2254
	WREG32(UVD_RBC_RB_CNTL, 0x11010101);
-
 
2255
	ring->ready = false;
-
 
2256
}
-
 
2257
 
-
 
2258
int r600_uvd_init(struct radeon_device *rdev)
-
 
2259
{
-
 
2260
	int i, j, r;
-
 
2261
	/* disable byte swapping */
-
 
2262
	u32 lmi_swap_cntl = 0;
-
 
2263
	u32 mp_swap_cntl = 0;
-
 
2264
 
-
 
2265
	/* raise clocks while booting up the VCPU */
-
 
2266
	radeon_set_uvd_clocks(rdev, 53300, 40000);
-
 
2267
 
-
 
2268
	/* disable clock gating */
-
 
2269
	WREG32(UVD_CGC_GATE, 0);
-
 
2270
 
-
 
2271
	/* disable interupt */
-
 
2272
	WREG32_P(UVD_MASTINT_EN, 0, ~(1 << 1));
-
 
2273
 
-
 
2274
	/* put LMI, VCPU, RBC etc... into reset */
-
 
2275
	WREG32(UVD_SOFT_RESET, LMI_SOFT_RESET | VCPU_SOFT_RESET |
-
 
2276
	       LBSI_SOFT_RESET | RBC_SOFT_RESET | CSM_SOFT_RESET |
-
 
2277
	       CXW_SOFT_RESET | TAP_SOFT_RESET | LMI_UMC_SOFT_RESET);
-
 
2278
	mdelay(5);
-
 
2279
 
-
 
2280
	/* take UVD block out of reset */
-
 
2281
	WREG32_P(SRBM_SOFT_RESET, 0, ~SOFT_RESET_UVD);
-
 
2282
	mdelay(5);
-
 
2283
 
-
 
2284
	/* initialize UVD memory controller */
-
 
2285
	WREG32(UVD_LMI_CTRL, 0x40 | (1 << 8) | (1 << 13) |
-
 
2286
			     (1 << 21) | (1 << 9) | (1 << 20));
-
 
2287
 
-
 
2288
#ifdef __BIG_ENDIAN
-
 
2289
	/* swap (8 in 32) RB and IB */
-
 
2290
	lmi_swap_cntl = 0xa;
-
 
2291
	mp_swap_cntl = 0;
-
 
2292
#endif
-
 
2293
	WREG32(UVD_LMI_SWAP_CNTL, lmi_swap_cntl);
-
 
2294
	WREG32(UVD_MP_SWAP_CNTL, mp_swap_cntl);
-
 
2295
 
-
 
2296
	WREG32(UVD_MPC_SET_MUXA0, 0x40c2040);
-
 
2297
	WREG32(UVD_MPC_SET_MUXA1, 0x0);
-
 
2298
	WREG32(UVD_MPC_SET_MUXB0, 0x40c2040);
-
 
2299
	WREG32(UVD_MPC_SET_MUXB1, 0x0);
-
 
2300
	WREG32(UVD_MPC_SET_ALU, 0);
-
 
2301
	WREG32(UVD_MPC_SET_MUX, 0x88);
-
 
2302
 
-
 
2303
	/* Stall UMC */
-
 
2304
	WREG32_P(UVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
-
 
2305
	WREG32_P(UVD_RB_ARB_CTRL, 1 << 3, ~(1 << 3));
-
 
2306
 
-
 
2307
	/* take all subblocks out of reset, except VCPU */
-
 
2308
	WREG32(UVD_SOFT_RESET, VCPU_SOFT_RESET);
-
 
2309
	mdelay(5);
-
 
2310
 
-
 
2311
	/* enable VCPU clock */
-
 
2312
	WREG32(UVD_VCPU_CNTL,  1 << 9);
-
 
2313
 
-
 
2314
	/* enable UMC */
-
 
2315
	WREG32_P(UVD_LMI_CTRL2, 0, ~(1 << 8));
-
 
2316
 
-
 
2317
	/* boot up the VCPU */
-
 
2318
	WREG32(UVD_SOFT_RESET, 0);
-
 
2319
	mdelay(10);
-
 
2320
 
-
 
2321
	WREG32_P(UVD_RB_ARB_CTRL, 0, ~(1 << 3));
-
 
2322
 
-
 
2323
	for (i = 0; i < 10; ++i) {
-
 
2324
		uint32_t status;
-
 
2325
		for (j = 0; j < 100; ++j) {
-
 
2326
			status = RREG32(UVD_STATUS);
-
 
2327
			if (status & 2)
-
 
2328
				break;
-
 
2329
			mdelay(10);
-
 
2330
		}
-
 
2331
		r = 0;
-
 
2332
		if (status & 2)
-
 
2333
			break;
-
 
2334
 
-
 
2335
		DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n");
-
 
2336
		WREG32_P(UVD_SOFT_RESET, VCPU_SOFT_RESET, ~VCPU_SOFT_RESET);
-
 
2337
		mdelay(10);
-
 
2338
		WREG32_P(UVD_SOFT_RESET, 0, ~VCPU_SOFT_RESET);
-
 
2339
		mdelay(10);
-
 
2340
		r = -1;
-
 
2341
	}
-
 
2342
 
-
 
2343
	if (r) {
-
 
2344
		DRM_ERROR("UVD not responding, giving up!!!\n");
-
 
2345
		radeon_set_uvd_clocks(rdev, 0, 0);
-
 
2346
		return r;
-
 
2347
	}
-
 
2348
 
-
 
2349
	/* enable interupt */
-
 
2350
	WREG32_P(UVD_MASTINT_EN, 3<<1, ~(3 << 1));
-
 
2351
 
-
 
2352
	r = r600_uvd_rbc_start(rdev);
-
 
2353
	if (!r)
-
 
2354
		DRM_INFO("UVD initialized successfully.\n");
-
 
2355
 
-
 
2356
	/* lower clocks again */
-
 
2357
	radeon_set_uvd_clocks(rdev, 0, 0);
-
 
2358
 
-
 
2359
	return r;
-
 
2360
}
-
 
2361
 
-
 
2362
/*
2639
/*
2363
 * GPU scratch registers helpers function.
2640
 * GPU scratch registers helpers function.
2364
 */
2641
 */
2365
void r600_scratch_init(struct radeon_device *rdev)
2642
void r600_scratch_init(struct radeon_device *rdev)
2366
{
2643
{
Line 2394... Line 2671...
2394
		return r;
2671
		return r;
2395
	}
2672
	}
2396
	radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2673
	radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2397
	radeon_ring_write(ring, ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
2674
	radeon_ring_write(ring, ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
2398
	radeon_ring_write(ring, 0xDEADBEEF);
2675
	radeon_ring_write(ring, 0xDEADBEEF);
2399
	radeon_ring_unlock_commit(rdev, ring);
2676
	radeon_ring_unlock_commit(rdev, ring, false);
2400
	for (i = 0; i < rdev->usec_timeout; i++) {
2677
	for (i = 0; i < rdev->usec_timeout; i++) {
2401
		tmp = RREG32(scratch);
2678
		tmp = RREG32(scratch);
2402
		if (tmp == 0xDEADBEEF)
2679
		if (tmp == 0xDEADBEEF)
2403
			break;
2680
			break;
2404
		DRM_UDELAY(1);
2681
		DRM_UDELAY(1);
Line 2412... Line 2689...
2412
	}
2689
	}
2413
	radeon_scratch_free(rdev, scratch);
2690
	radeon_scratch_free(rdev, scratch);
2414
	return r;
2691
	return r;
2415
}
2692
}
Line 2416... Line -...
2416
 
-
 
2417
/**
-
 
2418
 * r600_dma_ring_test - simple async dma engine test
-
 
2419
 *
-
 
2420
 * @rdev: radeon_device pointer
-
 
2421
 * @ring: radeon_ring structure holding ring information
-
 
2422
 *
-
 
2423
 * Test the DMA engine by writing using it to write an
-
 
2424
 * value to memory. (r6xx-SI).
-
 
2425
 * Returns 0 for success, error for failure.
-
 
2426
 */
-
 
2427
int r600_dma_ring_test(struct radeon_device *rdev,
-
 
2428
		       struct radeon_ring *ring)
-
 
2429
{
-
 
2430
	unsigned i;
-
 
2431
	int r;
-
 
2432
	void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
-
 
2433
	u32 tmp;
-
 
2434
 
-
 
2435
	if (!ptr) {
-
 
2436
		DRM_ERROR("invalid vram scratch pointer\n");
-
 
2437
		return -EINVAL;
-
 
2438
	}
-
 
2439
 
-
 
2440
	tmp = 0xCAFEDEAD;
-
 
2441
	writel(tmp, ptr);
-
 
2442
 
-
 
2443
	r = radeon_ring_lock(rdev, ring, 4);
-
 
2444
	if (r) {
-
 
2445
		DRM_ERROR("radeon: dma failed to lock ring %d (%d).\n", ring->idx, r);
-
 
2446
		return r;
-
 
2447
	}
-
 
2448
	radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
-
 
2449
	radeon_ring_write(ring, rdev->vram_scratch.gpu_addr & 0xfffffffc);
-
 
2450
	radeon_ring_write(ring, upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xff);
-
 
2451
	radeon_ring_write(ring, 0xDEADBEEF);
-
 
2452
	radeon_ring_unlock_commit(rdev, ring);
-
 
2453
 
-
 
2454
	for (i = 0; i < rdev->usec_timeout; i++) {
-
 
2455
		tmp = readl(ptr);
-
 
2456
		if (tmp == 0xDEADBEEF)
-
 
2457
			break;
-
 
2458
		DRM_UDELAY(1);
-
 
2459
	}
-
 
2460
 
-
 
2461
	if (i < rdev->usec_timeout) {
-
 
2462
		DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
-
 
2463
	} else {
-
 
2464
		DRM_ERROR("radeon: ring %d test failed (0x%08X)\n",
-
 
2465
			  ring->idx, tmp);
-
 
2466
		r = -EINVAL;
-
 
2467
	}
-
 
2468
	return r;
-
 
2469
}
-
 
2470
 
-
 
2471
int r600_uvd_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
-
 
2472
{
-
 
2473
	uint32_t tmp = 0;
-
 
2474
	unsigned i;
-
 
2475
	int r;
-
 
2476
 
-
 
2477
	WREG32(UVD_CONTEXT_ID, 0xCAFEDEAD);
-
 
2478
	r = radeon_ring_lock(rdev, ring, 3);
-
 
2479
	if (r) {
-
 
2480
		DRM_ERROR("radeon: cp failed to lock ring %d (%d).\n",
-
 
2481
			  ring->idx, r);
-
 
2482
		return r;
-
 
2483
	}
-
 
2484
	radeon_ring_write(ring, PACKET0(UVD_CONTEXT_ID, 0));
-
 
2485
	radeon_ring_write(ring, 0xDEADBEEF);
-
 
2486
	radeon_ring_unlock_commit(rdev, ring);
-
 
2487
	for (i = 0; i < rdev->usec_timeout; i++) {
-
 
2488
		tmp = RREG32(UVD_CONTEXT_ID);
-
 
2489
		if (tmp == 0xDEADBEEF)
-
 
2490
			break;
-
 
2491
		DRM_UDELAY(1);
-
 
2492
	}
-
 
2493
 
-
 
2494
	if (i < rdev->usec_timeout) {
-
 
2495
		DRM_INFO("ring test on %d succeeded in %d usecs\n",
-
 
2496
			 ring->idx, i);
-
 
2497
	} else {
-
 
2498
		DRM_ERROR("radeon: ring %d test failed (0x%08X)\n",
-
 
2499
			  ring->idx, tmp);
-
 
2500
		r = -EINVAL;
-
 
2501
	}
-
 
2502
	return r;
-
 
2503
}
-
 
2504
 
2693
 
2505
/*
2694
/*
2506
 * CP fences/semaphores
2695
 * CP fences/semaphores
Line 2507... Line 2696...
2507
 */
2696
 */
2508
 
2697
 
2509
void r600_fence_ring_emit(struct radeon_device *rdev,
2698
void r600_fence_ring_emit(struct radeon_device *rdev,
2510
			  struct radeon_fence *fence)
2699
			  struct radeon_fence *fence)
-
 
2700
{
-
 
2701
	struct radeon_ring *ring = &rdev->ring[fence->ring];
-
 
2702
	u32 cp_coher_cntl = PACKET3_TC_ACTION_ENA | PACKET3_VC_ACTION_ENA |
-
 
2703
		PACKET3_SH_ACTION_ENA;
-
 
2704
 
Line 2511... Line 2705...
2511
{
2705
	if (rdev->family >= CHIP_RV770)
2512
	struct radeon_ring *ring = &rdev->ring[fence->ring];
2706
		cp_coher_cntl |= PACKET3_FULL_CACHE_ENA;
2513
 
2707
 
2514
	if (rdev->wb.use_event) {
2708
	if (rdev->wb.use_event) {
2515
		u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
2709
		u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
2516
		/* flush read cache over gart */
-
 
2517
		radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
-
 
2518
		radeon_ring_write(ring, PACKET3_TC_ACTION_ENA |
2710
		/* flush read cache over gart */
2519
					PACKET3_VC_ACTION_ENA |
2711
		radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
2520
					PACKET3_SH_ACTION_ENA);
2712
		radeon_ring_write(ring, cp_coher_cntl);
2521
		radeon_ring_write(ring, 0xFFFFFFFF);
2713
		radeon_ring_write(ring, 0xFFFFFFFF);
2522
		radeon_ring_write(ring, 0);
2714
		radeon_ring_write(ring, 0);
2523
		radeon_ring_write(ring, 10); /* poll interval */
2715
		radeon_ring_write(ring, 10); /* poll interval */
2524
		/* EVENT_WRITE_EOP - flush caches, send int */
2716
		/* EVENT_WRITE_EOP - flush caches, send int */
2525
		radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
2717
		radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
2526
		radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5));
2718
		radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5));
2527
		radeon_ring_write(ring, addr & 0xffffffff);
2719
		radeon_ring_write(ring, lower_32_bits(addr));
2528
		radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2));
2720
		radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2));
2529
		radeon_ring_write(ring, fence->seq);
2721
		radeon_ring_write(ring, fence->seq);
2530
		radeon_ring_write(ring, 0);
2722
		radeon_ring_write(ring, 0);
2531
	} else {
2723
	} else {
2532
		/* flush read cache over gart */
-
 
2533
		radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
-
 
2534
		radeon_ring_write(ring, PACKET3_TC_ACTION_ENA |
2724
		/* flush read cache over gart */
2535
					PACKET3_VC_ACTION_ENA |
2725
		radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
2536
					PACKET3_SH_ACTION_ENA);
2726
		radeon_ring_write(ring, cp_coher_cntl);
2537
		radeon_ring_write(ring, 0xFFFFFFFF);
2727
		radeon_ring_write(ring, 0xFFFFFFFF);
2538
		radeon_ring_write(ring, 0);
2728
		radeon_ring_write(ring, 0);
Line 2551... Line 2741...
2551
		radeon_ring_write(ring, PACKET0(CP_INT_STATUS, 0));
2741
		radeon_ring_write(ring, PACKET0(CP_INT_STATUS, 0));
2552
		radeon_ring_write(ring, RB_INT_STAT);
2742
		radeon_ring_write(ring, RB_INT_STAT);
2553
	}
2743
	}
2554
}
2744
}
Line 2555... Line -...
2555
 
-
 
2556
void r600_uvd_fence_emit(struct radeon_device *rdev,
-
 
2557
			 struct radeon_fence *fence)
-
 
2558
{
-
 
2559
	struct radeon_ring *ring = &rdev->ring[fence->ring];
-
 
2560
	uint32_t addr = rdev->fence_drv[fence->ring].gpu_addr;
-
 
2561
 
-
 
2562
	radeon_ring_write(ring, PACKET0(UVD_CONTEXT_ID, 0));
-
 
2563
	radeon_ring_write(ring, fence->seq);
-
 
2564
	radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA0, 0));
-
 
2565
	radeon_ring_write(ring, addr & 0xffffffff);
-
 
2566
	radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA1, 0));
-
 
2567
	radeon_ring_write(ring, upper_32_bits(addr) & 0xff);
-
 
2568
	radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_CMD, 0));
-
 
2569
	radeon_ring_write(ring, 0);
-
 
2570
 
-
 
2571
	radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA0, 0));
-
 
2572
	radeon_ring_write(ring, 0);
-
 
2573
	radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA1, 0));
-
 
2574
	radeon_ring_write(ring, 0);
-
 
2575
	radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_CMD, 0));
-
 
2576
	radeon_ring_write(ring, 2);
-
 
2577
	return;
-
 
2578
}
-
 
2579
 
-
 
2580
void r600_semaphore_ring_emit(struct radeon_device *rdev,
-
 
2581
			      struct radeon_ring *ring,
-
 
2582
			      struct radeon_semaphore *semaphore,
-
 
2583
			      bool emit_wait)
-
 
2584
{
-
 
2585
	uint64_t addr = semaphore->gpu_addr;
-
 
2586
	unsigned sel = emit_wait ? PACKET3_SEM_SEL_WAIT : PACKET3_SEM_SEL_SIGNAL;
-
 
2587
 
-
 
2588
	if (rdev->family < CHIP_CAYMAN)
-
 
2589
		sel |= PACKET3_SEM_WAIT_ON_SIGNAL;
-
 
2590
 
-
 
2591
	radeon_ring_write(ring, PACKET3(PACKET3_MEM_SEMAPHORE, 1));
-
 
2592
	radeon_ring_write(ring, addr & 0xffffffff);
-
 
2593
	radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | sel);
-
 
2594
}
-
 
2595
 
-
 
2596
/*
-
 
2597
 * DMA fences/semaphores
-
 
2598
 */
-
 
2599
 
2745
 
2600
/**
2746
/**
2601
 * r600_dma_fence_ring_emit - emit a fence on the DMA ring
2747
 * r600_semaphore_ring_emit - emit a semaphore on the CP ring
2602
 *
2748
 *
2603
 * @rdev: radeon_device pointer
2749
 * @rdev: radeon_device pointer
2604
 * @fence: radeon fence object
-
 
2605
 *
-
 
2606
 * Add a DMA fence packet to the ring to write
-
 
2607
 * the fence seq number and DMA trap packet to generate
-
 
2608
 * an interrupt if needed (r6xx-r7xx).
-
 
2609
 */
-
 
2610
void r600_dma_fence_ring_emit(struct radeon_device *rdev,
-
 
2611
			      struct radeon_fence *fence)
-
 
2612
{
-
 
2613
	struct radeon_ring *ring = &rdev->ring[fence->ring];
-
 
2614
	u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
-
 
2615
 
-
 
2616
	/* write the fence */
-
 
2617
	radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0, 0));
-
 
2618
	radeon_ring_write(ring, addr & 0xfffffffc);
-
 
2619
	radeon_ring_write(ring, (upper_32_bits(addr) & 0xff));
-
 
2620
	radeon_ring_write(ring, lower_32_bits(fence->seq));
-
 
2621
	/* generate an interrupt */
-
 
2622
	radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0, 0));
-
 
2623
}
-
 
2624
 
-
 
2625
/**
-
 
2626
 * r600_dma_semaphore_ring_emit - emit a semaphore on the dma ring
-
 
2627
 *
-
 
2628
 * @rdev: radeon_device pointer
-
 
2629
 * @ring: radeon_ring structure holding ring information
2750
 * @ring: radeon ring buffer object
2630
 * @semaphore: radeon semaphore object
2751
 * @semaphore: radeon semaphore object
2631
 * @emit_wait: wait or signal semaphore
2752
 * @emit_wait: Is this a sempahore wait?
2632
 *
2753
 *
2633
 * Add a DMA semaphore packet to the ring wait on or signal
2754
 * Emits a semaphore signal/wait packet to the CP ring and prevents the PFP
2634
 * other rings (r6xx-SI).
2755
 * from running ahead of semaphore waits.
2635
 */
2756
 */
2636
void r600_dma_semaphore_ring_emit(struct radeon_device *rdev,
-
 
2637
				  struct radeon_ring *ring,
-
 
2638
				  struct radeon_semaphore *semaphore,
-
 
2639
				  bool emit_wait)
-
 
2640
{
-
 
2641
	u64 addr = semaphore->gpu_addr;
-
 
2642
	u32 s = emit_wait ? 0 : 1;
-
 
2643
 
-
 
2644
	radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SEMAPHORE, 0, s, 0));
-
 
2645
	radeon_ring_write(ring, addr & 0xfffffffc);
-
 
2646
	radeon_ring_write(ring, upper_32_bits(addr) & 0xff);
-
 
2647
}
-
 
2648
 
-
 
2649
void r600_uvd_semaphore_emit(struct radeon_device *rdev,
2757
bool r600_semaphore_ring_emit(struct radeon_device *rdev,
2650
			     struct radeon_ring *ring,
2758
			      struct radeon_ring *ring,
2651
			     struct radeon_semaphore *semaphore,
2759
			      struct radeon_semaphore *semaphore,
2652
			     bool emit_wait)
2760
			      bool emit_wait)
2653
{
2761
{
-
 
2762
	uint64_t addr = semaphore->gpu_addr;
Line 2654... Line 2763...
2654
	uint64_t addr = semaphore->gpu_addr;
2763
	unsigned sel = emit_wait ? PACKET3_SEM_SEL_WAIT : PACKET3_SEM_SEL_SIGNAL;
2655
 
2764
 
Line 2656... Line 2765...
2656
	radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_LOW, 0));
2765
	if (rdev->family < CHIP_CAYMAN)
-
 
2766
		sel |= PACKET3_SEM_WAIT_ON_SIGNAL;
2657
	radeon_ring_write(ring, (addr >> 3) & 0x000FFFFF);
2767
 
Line -... Line 2768...
-
 
2768
	radeon_ring_write(ring, PACKET3(PACKET3_MEM_SEMAPHORE, 1));
-
 
2769
	radeon_ring_write(ring, lower_32_bits(addr));
-
 
2770
	radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | sel);
2658
 
2771
 
2659
	radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_HIGH, 0));
2772
	/* PFP_SYNC_ME packet only exists on 7xx+ */
2660
	radeon_ring_write(ring, (addr >> 23) & 0x000FFFFF);
2773
	if (emit_wait && (rdev->family >= CHIP_RV770)) {
Line 2661... Line -...
2661
 
-
 
2662
	radeon_ring_write(ring, PACKET0(UVD_SEMA_CMD, 0));
-
 
2663
	radeon_ring_write(ring, emit_wait ? 1 : 0);
-
 
2664
}
-
 
2665
 
-
 
2666
int r600_copy_blit(struct radeon_device *rdev,
-
 
2667
		   uint64_t src_offset,
-
 
2668
		   uint64_t dst_offset,
-
 
2669
		   unsigned num_gpu_pages,
-
 
2670
		   struct radeon_fence **fence)
-
 
2671
{
-
 
2672
	struct radeon_semaphore *sem = NULL;
-
 
2673
	struct radeon_sa_bo *vb = NULL;
2774
		/* Prevent the PFP from running ahead of the semaphore wait */
2674
	int r;
-
 
2675
 
-
 
2676
	r = r600_blit_prepare_copy(rdev, num_gpu_pages, fence, &vb, &sem);
-
 
2677
	if (r) {
-
 
2678
		return r;
2775
		radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
Line 2679... Line 2776...
2679
	}
2776
		radeon_ring_write(ring, 0x0);
2680
	r600_kms_blit_copy(rdev, src_offset, dst_offset, num_gpu_pages, vb);
2777
	}
2681
	r600_blit_done_copy(rdev, fence, vb, sem);
2778
 
2682
	return 0;
2779
	return true;
2683
}
2780
}
2684
 
2781
 
2685
/**
2782
/**
2686
 * r600_copy_dma - copy pages using the DMA engine
2783
 * r600_copy_cpdma - copy pages using the CP DMA engine
2687
 *
2784
 *
2688
 * @rdev: radeon_device pointer
2785
 * @rdev: radeon_device pointer
2689
 * @src_offset: src GPU address
2786
 * @src_offset: src GPU address
2690
 * @dst_offset: dst GPU address
2787
 * @dst_offset: dst GPU address
2691
 * @num_gpu_pages: number of GPU pages to xfer
2788
 * @num_gpu_pages: number of GPU pages to xfer
2692
 * @fence: radeon fence object
2789
 * @fence: radeon fence object
2693
 *
2790
 *
2694
 * Copy GPU paging using the DMA engine (r6xx).
2791
 * Copy GPU paging using the CP DMA engine (r6xx+).
2695
 * Used by the radeon ttm implementation to move pages if
2792
 * Used by the radeon ttm implementation to move pages if
2696
 * registered as the asic copy callback.
2793
 * registered as the asic copy callback.
2697
 */
2794
 */
2698
int r600_copy_dma(struct radeon_device *rdev,
2795
int r600_copy_cpdma(struct radeon_device *rdev,
2699
		  uint64_t src_offset, uint64_t dst_offset,
2796
		  uint64_t src_offset, uint64_t dst_offset,
2700
		  unsigned num_gpu_pages,
2797
		  unsigned num_gpu_pages,
2701
		  struct radeon_fence **fence)
2798
		  struct radeon_fence **fence)
2702
{
2799
{
Line 2703... Line 2800...
2703
	struct radeon_semaphore *sem = NULL;
2800
	struct radeon_semaphore *sem = NULL;
2704
	int ring_index = rdev->asic->copy.dma_ring_index;
2801
	int ring_index = rdev->asic->copy.blit_ring_index;
2705
	struct radeon_ring *ring = &rdev->ring[ring_index];
2802
	struct radeon_ring *ring = &rdev->ring[ring_index];
2706
	u32 size_in_dw, cur_size_in_dw;
2803
	u32 size_in_bytes, cur_size_in_bytes, tmp;
2707
	int i, num_loops;
2804
	int i, num_loops;
Line 2708... Line 2805...
2708
	int r = 0;
2805
	int r = 0;
2709
 
2806
 
2710
	r = radeon_semaphore_create(rdev, &sem);
2807
	r = radeon_semaphore_create(rdev, &sem);
2711
	if (r) {
2808
	if (r) {
2712
		DRM_ERROR("radeon: moving bo (%d).\n", r);
2809
		DRM_ERROR("radeon: moving bo (%d).\n", r);
2713
		return r;
2810
		return r;
2714
	}
2811
	}
2715
 
2812
 
Line 2716... Line -...
2716
	size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
-
 
2717
	num_loops = DIV_ROUND_UP(size_in_dw, 0xFFFE);
2813
	size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
2718
	r = radeon_ring_lock(rdev, ring, num_loops * 4 + 8);
-
 
2719
	if (r) {
-
 
2720
		DRM_ERROR("radeon: moving bo (%d).\n", r);
-
 
2721
		radeon_semaphore_free(rdev, &sem, NULL);
2814
	num_loops = DIV_ROUND_UP(size_in_bytes, 0x1fffff);
2722
		return r;
-
 
Line -... Line 2815...
-
 
2815
	r = radeon_ring_lock(rdev, ring, num_loops * 6 + 24);
-
 
2816
	if (r) {
-
 
2817
		DRM_ERROR("radeon: moving bo (%d).\n", r);
2723
	}
2818
		radeon_semaphore_free(rdev, &sem, NULL);
2724
 
2819
		return r;
2725
	if (radeon_fence_need_sync(*fence, ring->idx)) {
2820
	}
2726
		radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
2821
 
2727
					    ring->idx);
2822
	radeon_semaphore_sync_to(sem, *fence);
-
 
2823
	radeon_semaphore_sync_rings(rdev, sem, ring->idx);
-
 
2824
 
-
 
2825
	radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2728
		radeon_fence_note_sync(*fence, ring->idx);
2826
	radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
2729
	} else {
2827
	radeon_ring_write(ring, WAIT_3D_IDLE_bit);
-
 
2828
	for (i = 0; i < num_loops; i++) {
2730
		radeon_semaphore_free(rdev, &sem, NULL);
2829
		cur_size_in_bytes = size_in_bytes;
2731
	}
2830
		if (cur_size_in_bytes > 0x1fffff)
2732
 
2831
			cur_size_in_bytes = 0x1fffff;
2733
	for (i = 0; i < num_loops; i++) {
2832
		size_in_bytes -= cur_size_in_bytes;
2734
		cur_size_in_dw = size_in_dw;
2833
		tmp = upper_32_bits(src_offset) & 0xff;
2735
		if (cur_size_in_dw > 0xFFFE)
2834
		if (size_in_bytes == 0)
-
 
2835
			tmp |= PACKET3_CP_DMA_CP_SYNC;
-
 
2836
		radeon_ring_write(ring, PACKET3(PACKET3_CP_DMA, 4));
-
 
2837
		radeon_ring_write(ring, lower_32_bits(src_offset));
Line 2736... Line 2838...
2736
			cur_size_in_dw = 0xFFFE;
2838
		radeon_ring_write(ring, tmp);
2737
		size_in_dw -= cur_size_in_dw;
2839
		radeon_ring_write(ring, lower_32_bits(dst_offset));
2738
		radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, 0, cur_size_in_dw));
2840
		radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
-
 
2841
		radeon_ring_write(ring, cur_size_in_bytes);
2739
		radeon_ring_write(ring, dst_offset & 0xfffffffc);
2842
		src_offset += cur_size_in_bytes;
2740
		radeon_ring_write(ring, src_offset & 0xfffffffc);
2843
		dst_offset += cur_size_in_bytes;
Line 2741... Line 2844...
2741
		radeon_ring_write(ring, (((upper_32_bits(dst_offset) & 0xff) << 16) |
2844
	}
2742
					 (upper_32_bits(src_offset) & 0xff)));
2845
	radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
Line 2743... Line 2846...
2743
		src_offset += cur_size_in_dw * 4;
2846
	radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
2744
		dst_offset += cur_size_in_dw * 4;
2847
	radeon_ring_write(ring, WAIT_CP_DMA_IDLE_bit);
Line 2775... Line 2878...
2775
	int r;
2878
	int r;
Line 2776... Line 2879...
2776
 
2879
 
2777
	/* enable pcie gen2 link */
2880
	/* enable pcie gen2 link */
Line 2778... Line 2881...
2778
	r600_pcie_gen2_enable(rdev);
2881
	r600_pcie_gen2_enable(rdev);
2779
 
-
 
2780
	if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
-
 
2781
		r = r600_init_microcode(rdev);
-
 
2782
		if (r) {
-
 
2783
			DRM_ERROR("Failed to load firmware!\n");
-
 
2784
			return r;
-
 
2785
		}
-
 
2786
	}
2882
 
2787
 
2883
	/* scratch needs to be initialized before MC */
2788
	r = r600_vram_scratch_init(rdev);
2884
	r = r600_vram_scratch_init(rdev);
Line 2789... Line 2885...
2789
	if (r)
2885
	if (r)
-
 
2886
		return r;
2790
		return r;
2887
 
2791
 
2888
	r600_mc_program(rdev);
2792
	r600_mc_program(rdev);
2889
 
2793
	if (rdev->flags & RADEON_IS_AGP) {
2890
	if (rdev->flags & RADEON_IS_AGP) {
2794
		r600_agp_enable(rdev);
2891
		r600_agp_enable(rdev);
2795
	} else {
2892
	} else {
2796
		r = r600_pcie_gart_enable(rdev);
2893
		r = r600_pcie_gart_enable(rdev);
2797
		if (r)
2894
		if (r)
2798
			return r;
-
 
2799
	}
-
 
2800
	r600_gpu_init(rdev);
-
 
2801
	r = r600_blit_init(rdev);
-
 
2802
	if (r) {
-
 
2803
//		r600_blit_fini(rdev);
-
 
Line 2804... Line 2895...
2804
		rdev->asic->copy.copy = NULL;
2895
			return r;
2805
		dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
2896
	}
2806
	}
2897
	r600_gpu_init(rdev);
2807
 
2898
 
Line 2814... Line 2905...
2814
	if (r) {
2905
	if (r) {
2815
		dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
2906
		dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
2816
		return r;
2907
		return r;
2817
	}
2908
	}
Line 2818... Line -...
2818
 
-
 
2819
	r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX);
-
 
2820
	if (r) {
-
 
2821
		dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
-
 
2822
		return r;
-
 
2823
	}
-
 
2824
 
2909
 
2825
	/* Enable IRQ */
2910
	/* Enable IRQ */
2826
	if (!rdev->irq.installed) {
2911
	if (!rdev->irq.installed) {
2827
		r = radeon_irq_kms_init(rdev);
2912
		r = radeon_irq_kms_init(rdev);
2828
		if (r)
2913
		if (r)
Line 2837... Line 2922...
2837
	}
2922
	}
2838
	r600_irq_set(rdev);
2923
	r600_irq_set(rdev);
Line 2839... Line 2924...
2839
 
2924
 
2840
	ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
2925
	ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
2841
	r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
-
 
2842
			     R600_CP_RB_RPTR, R600_CP_RB_WPTR,
2926
	r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
2843
			     0, 0xfffff, RADEON_CP_PACKET2);
-
 
2844
	if (r)
-
 
2845
		return r;
-
 
2846
 
-
 
2847
	ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
-
 
2848
	r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
-
 
2849
			     DMA_RB_RPTR, DMA_RB_WPTR,
-
 
2850
			     2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
2927
			     RADEON_CP_PACKET2);
2851
	if (r)
2928
	if (r)
Line 2852... Line 2929...
2852
		return r;
2929
		return r;
2853
 
2930
 
2854
	r = r600_cp_load_microcode(rdev);
2931
	r = r600_cp_load_microcode(rdev);
2855
	if (r)
2932
	if (r)
2856
		return r;
2933
		return r;
2857
	r = r600_cp_resume(rdev);
2934
	r = r600_cp_resume(rdev);
Line 2858... Line -...
2858
	if (r)
-
 
2859
		return r;
-
 
2860
 
-
 
2861
	r = r600_dma_resume(rdev);
-
 
2862
	if (r)
2935
	if (r)
2863
		return r;
2936
		return r;
2864
 
2937
 
2865
	r = radeon_ib_pool_init(rdev);
2938
	r = radeon_ib_pool_init(rdev);
2866
	if (r) {
2939
	if (r) {
-
 
2940
		dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
-
 
2941
		return r;
2867
		dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
2942
	}
2868
		return r;
2943
 
Line 2869... Line 2944...
2869
	}
2944
 
2870
	return 0;
2945
	return 0;
Line 2944... Line 3019...
2944
	/* Memory manager */
3019
	/* Memory manager */
2945
	r = radeon_bo_init(rdev);
3020
	r = radeon_bo_init(rdev);
2946
	if (r)
3021
	if (r)
2947
		return r;
3022
		return r;
Line -... Line 3023...
-
 
3023
 
-
 
3024
	if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
-
 
3025
		r = r600_init_microcode(rdev);
-
 
3026
		if (r) {
-
 
3027
			DRM_ERROR("Failed to load firmware!\n");
-
 
3028
			return r;
-
 
3029
		}
-
 
3030
	}
-
 
3031
 
-
 
3032
	/* Initialize power management */
-
 
3033
	radeon_pm_init(rdev);
2948
 
3034
 
2949
	rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
3035
	rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
Line 2950... Line -...
2950
	r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
-
 
2951
 
-
 
2952
	rdev->ring[R600_RING_TYPE_DMA_INDEX].ring_obj = NULL;
-
 
2953
	r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX], 64 * 1024);
3036
	r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
2954
 
3037
 
Line 2955... Line 3038...
2955
	rdev->ih.ring_obj = NULL;
3038
	rdev->ih.ring_obj = NULL;
2956
	r600_ih_ring_init(rdev, 64 * 1024);
3039
	r600_ih_ring_init(rdev, 64 * 1024);
Line 3001... Line 3084...
3001
			  (ib->gpu_addr & 0xFFFFFFFC));
3084
			  (ib->gpu_addr & 0xFFFFFFFC));
3002
	radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFF);
3085
	radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFF);
3003
	radeon_ring_write(ring, ib->length_dw);
3086
	radeon_ring_write(ring, ib->length_dw);
3004
}
3087
}
Line 3005... Line -...
3005
 
-
 
3006
void r600_uvd_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
-
 
3007
{
-
 
3008
	struct radeon_ring *ring = &rdev->ring[ib->ring];
-
 
3009
 
-
 
3010
	radeon_ring_write(ring, PACKET0(UVD_RBC_IB_BASE, 0));
-
 
3011
	radeon_ring_write(ring, ib->gpu_addr);
-
 
3012
	radeon_ring_write(ring, PACKET0(UVD_RBC_IB_SIZE, 0));
-
 
3013
	radeon_ring_write(ring, ib->length_dw);
-
 
3014
}
-
 
3015
 
3088
 
3016
int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
3089
int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
3017
{
3090
{
3018
	struct radeon_ib ib;
3091
	struct radeon_ib ib;
3019
	uint32_t scratch;
3092
	uint32_t scratch;
Line 3034... Line 3107...
3034
	}
3107
	}
3035
	ib.ptr[0] = PACKET3(PACKET3_SET_CONFIG_REG, 1);
3108
	ib.ptr[0] = PACKET3(PACKET3_SET_CONFIG_REG, 1);
3036
	ib.ptr[1] = ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
3109
	ib.ptr[1] = ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
3037
	ib.ptr[2] = 0xDEADBEEF;
3110
	ib.ptr[2] = 0xDEADBEEF;
3038
	ib.length_dw = 3;
3111
	ib.length_dw = 3;
3039
	r = radeon_ib_schedule(rdev, &ib, NULL);
3112
	r = radeon_ib_schedule(rdev, &ib, NULL, false);
3040
	if (r) {
3113
	if (r) {
3041
		DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
3114
		DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
3042
		goto free_ib;
3115
		goto free_ib;
3043
	}
3116
	}
3044
	r = radeon_fence_wait(ib.fence, false);
3117
	r = radeon_fence_wait(ib.fence, false);
Line 3064... Line 3137...
3064
free_scratch:
3137
free_scratch:
3065
	radeon_scratch_free(rdev, scratch);
3138
	radeon_scratch_free(rdev, scratch);
3066
	return r;
3139
	return r;
3067
}
3140
}
Line 3068... Line -...
3068
 
-
 
3069
/**
-
 
3070
 * r600_dma_ib_test - test an IB on the DMA engine
-
 
3071
 *
-
 
3072
 * @rdev: radeon_device pointer
-
 
3073
 * @ring: radeon_ring structure holding ring information
-
 
3074
 *
-
 
3075
 * Test a simple IB in the DMA ring (r6xx-SI).
-
 
3076
 * Returns 0 on success, error on failure.
-
 
3077
 */
-
 
3078
int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
-
 
3079
{
-
 
3080
	struct radeon_ib ib;
-
 
3081
	unsigned i;
-
 
3082
	int r;
-
 
3083
	void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
-
 
3084
	u32 tmp = 0;
-
 
3085
 
-
 
3086
	if (!ptr) {
-
 
3087
		DRM_ERROR("invalid vram scratch pointer\n");
-
 
3088
		return -EINVAL;
-
 
3089
	}
-
 
3090
 
-
 
3091
	tmp = 0xCAFEDEAD;
-
 
3092
	writel(tmp, ptr);
-
 
3093
 
-
 
3094
	r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256);
-
 
3095
	if (r) {
-
 
3096
		DRM_ERROR("radeon: failed to get ib (%d).\n", r);
-
 
3097
		return r;
-
 
3098
	}
-
 
3099
 
-
 
3100
	ib.ptr[0] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1);
-
 
3101
	ib.ptr[1] = rdev->vram_scratch.gpu_addr & 0xfffffffc;
-
 
3102
	ib.ptr[2] = upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xff;
-
 
3103
	ib.ptr[3] = 0xDEADBEEF;
-
 
3104
	ib.length_dw = 4;
-
 
3105
 
-
 
3106
	r = radeon_ib_schedule(rdev, &ib, NULL);
-
 
3107
	if (r) {
-
 
3108
		radeon_ib_free(rdev, &ib);
-
 
3109
		DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
-
 
3110
		return r;
-
 
3111
	}
-
 
3112
	r = radeon_fence_wait(ib.fence, false);
-
 
3113
	if (r) {
-
 
3114
		DRM_ERROR("radeon: fence wait failed (%d).\n", r);
-
 
3115
		return r;
-
 
3116
	}
-
 
3117
	for (i = 0; i < rdev->usec_timeout; i++) {
-
 
3118
		tmp = readl(ptr);
-
 
3119
		if (tmp == 0xDEADBEEF)
-
 
3120
			break;
-
 
3121
		DRM_UDELAY(1);
-
 
3122
	}
-
 
3123
	if (i < rdev->usec_timeout) {
-
 
3124
		DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib.fence->ring, i);
-
 
3125
	} else {
-
 
3126
		DRM_ERROR("radeon: ib test failed (0x%08X)\n", tmp);
-
 
3127
		r = -EINVAL;
-
 
3128
	}
-
 
3129
	radeon_ib_free(rdev, &ib);
-
 
3130
	return r;
-
 
3131
}
-
 
3132
 
-
 
3133
int r600_uvd_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
-
 
3134
{
-
 
3135
	struct radeon_fence *fence = NULL;
-
 
3136
	int r;
-
 
3137
 
-
 
3138
	r = radeon_set_uvd_clocks(rdev, 53300, 40000);
-
 
3139
	if (r) {
-
 
3140
		DRM_ERROR("radeon: failed to raise UVD clocks (%d).\n", r);
-
 
3141
		return r;
-
 
3142
	}
-
 
3143
 
-
 
3144
//   r = radeon_uvd_get_create_msg(rdev, ring->idx, 1, NULL);
-
 
3145
    if (r) {
-
 
3146
		DRM_ERROR("radeon: failed to get create msg (%d).\n", r);
-
 
3147
		goto error;
-
 
3148
	}
-
 
3149
 
-
 
3150
//   r = radeon_uvd_get_destroy_msg(rdev, ring->idx, 1, &fence);
-
 
3151
	if (r) {
-
 
3152
		DRM_ERROR("radeon: failed to get destroy ib (%d).\n", r);
-
 
3153
		goto error;
-
 
3154
	}
-
 
3155
 
-
 
3156
	r = radeon_fence_wait(fence, false);
-
 
3157
	if (r) {
-
 
3158
		DRM_ERROR("radeon: fence wait failed (%d).\n", r);
-
 
3159
		goto error;
-
 
3160
	}
-
 
3161
	DRM_INFO("ib test on ring %d succeeded\n",  ring->idx);
-
 
3162
error:
-
 
3163
	radeon_fence_unref(&fence);
-
 
3164
	radeon_set_uvd_clocks(rdev, 0, 0);
-
 
3165
	return r;
-
 
3166
}
-
 
3167
 
-
 
3168
/**
-
 
3169
 * r600_dma_ring_ib_execute - Schedule an IB on the DMA engine
-
 
3170
 *
-
 
3171
 * @rdev: radeon_device pointer
-
 
3172
 * @ib: IB object to schedule
-
 
3173
 *
-
 
3174
 * Schedule an IB in the DMA ring (r6xx-r7xx).
-
 
3175
 */
-
 
3176
void r600_dma_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
-
 
3177
{
-
 
3178
	struct radeon_ring *ring = &rdev->ring[ib->ring];
-
 
3179
 
-
 
3180
	if (rdev->wb.enabled) {
-
 
3181
		u32 next_rptr = ring->wptr + 4;
-
 
3182
		while ((next_rptr & 7) != 5)
-
 
3183
			next_rptr++;
-
 
3184
		next_rptr += 3;
-
 
3185
		radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
-
 
3186
		radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
-
 
3187
		radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff);
-
 
3188
		radeon_ring_write(ring, next_rptr);
-
 
3189
	}
-
 
3190
 
-
 
3191
	/* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
-
 
3192
	 * Pad as necessary with NOPs.
-
 
3193
	 */
-
 
3194
	while ((ring->wptr & 7) != 5)
-
 
3195
		radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
-
 
3196
	radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_INDIRECT_BUFFER, 0, 0, 0));
-
 
3197
	radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
-
 
3198
	radeon_ring_write(ring, (ib->length_dw << 16) | (upper_32_bits(ib->gpu_addr) & 0xFF));
-
 
3199
 
-
 
3200
}
-
 
3201
 
3141
 
3202
/*
3142
/*
3203
 * Interrupts
3143
 * Interrupts
3204
 *
3144
 *
3205
 * Interrupts use a ring buffer on r6xx/r7xx hardware.  It works pretty
3145
 * Interrupts use a ring buffer on r6xx/r7xx hardware.  It works pretty
Line 3213... Line 3153...
3213
void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size)
3153
void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size)
3214
{
3154
{
3215
	u32 rb_bufsz;
3155
	u32 rb_bufsz;
Line 3216... Line 3156...
3216
 
3156
 
3217
	/* Align ring size */
3157
	/* Align ring size */
3218
	rb_bufsz = drm_order(ring_size / 4);
3158
	rb_bufsz = order_base_2(ring_size / 4);
3219
	ring_size = (1 << rb_bufsz) * 4;
3159
	ring_size = (1 << rb_bufsz) * 4;
3220
	rdev->ih.ring_size = ring_size;
3160
	rdev->ih.ring_size = ring_size;
3221
	rdev->ih.ptr_mask = rdev->ih.ring_size - 1;
3161
	rdev->ih.ptr_mask = rdev->ih.ring_size - 1;
3222
	rdev->ih.rptr = 0;
3162
	rdev->ih.rptr = 0;
Line 3228... Line 3168...
3228
 
3168
 
3229
	/* Allocate ring buffer */
3169
	/* Allocate ring buffer */
3230
	if (rdev->ih.ring_obj == NULL) {
3170
	if (rdev->ih.ring_obj == NULL) {
3231
		r = radeon_bo_create(rdev, rdev->ih.ring_size,
3171
		r = radeon_bo_create(rdev, rdev->ih.ring_size,
3232
				     PAGE_SIZE, true,
3172
				     PAGE_SIZE, true,
3233
				     RADEON_GEM_DOMAIN_GTT,
3173
				     RADEON_GEM_DOMAIN_GTT, 0,
3234
				     NULL, &rdev->ih.ring_obj);
3174
				     NULL, &rdev->ih.ring_obj);
3235
		if (r) {
3175
		if (r) {
3236
			DRM_ERROR("radeon: failed to create ih ring buffer (%d).\n", r);
3176
			DRM_ERROR("radeon: failed to create ih ring buffer (%d).\n", r);
3237
			return r;
3177
			return r;
Line 3293... Line 3233...
3293
static void r600_rlc_start(struct radeon_device *rdev)
3233
static void r600_rlc_start(struct radeon_device *rdev)
3294
{
3234
{
3295
	WREG32(RLC_CNTL, RLC_ENABLE);
3235
	WREG32(RLC_CNTL, RLC_ENABLE);
3296
}
3236
}
Line 3297... Line 3237...
3297
 
3237
 
3298
static int r600_rlc_init(struct radeon_device *rdev)
3238
static int r600_rlc_resume(struct radeon_device *rdev)
3299
{
3239
{
3300
	u32 i;
3240
	u32 i;
Line 3301... Line 3241...
3301
	const __be32 *fw_data;
3241
	const __be32 *fw_data;
Line 3305... Line 3245...
3305
 
3245
 
Line 3306... Line 3246...
3306
	r600_rlc_stop(rdev);
3246
	r600_rlc_stop(rdev);
Line 3307... Line -...
3307
 
-
 
3308
	WREG32(RLC_HB_CNTL, 0);
-
 
3309
 
-
 
3310
	if (rdev->family == CHIP_ARUBA) {
-
 
3311
		WREG32(TN_RLC_SAVE_AND_RESTORE_BASE, rdev->rlc.save_restore_gpu_addr >> 8);
-
 
3312
		WREG32(TN_RLC_CLEAR_STATE_RESTORE_BASE, rdev->rlc.clear_state_gpu_addr >> 8);
3247
 
3313
	}
3248
	WREG32(RLC_HB_CNTL, 0);
3314
	if (rdev->family <= CHIP_CAYMAN) {
3249
 
3315
	WREG32(RLC_HB_BASE, 0);
-
 
3316
	WREG32(RLC_HB_RPTR, 0);
-
 
3317
	WREG32(RLC_HB_WPTR, 0);
3250
	WREG32(RLC_HB_BASE, 0);
3318
	}
3251
	WREG32(RLC_HB_RPTR, 0);
3319
	if (rdev->family <= CHIP_CAICOS) {
-
 
3320
		WREG32(RLC_HB_WPTR_LSB_ADDR, 0);
3252
	WREG32(RLC_HB_WPTR, 0);
3321
		WREG32(RLC_HB_WPTR_MSB_ADDR, 0);
3253
		WREG32(RLC_HB_WPTR_LSB_ADDR, 0);
Line 3322... Line 3254...
3322
	}
3254
		WREG32(RLC_HB_WPTR_MSB_ADDR, 0);
3323
	WREG32(RLC_MC_CNTL, 0);
3255
	WREG32(RLC_MC_CNTL, 0);
3324
	WREG32(RLC_UCODE_CNTL, 0);
-
 
3325
 
-
 
3326
	fw_data = (const __be32 *)rdev->rlc_fw->data;
-
 
3327
	if (rdev->family >= CHIP_ARUBA) {
-
 
3328
		for (i = 0; i < ARUBA_RLC_UCODE_SIZE; i++) {
-
 
3329
			WREG32(RLC_UCODE_ADDR, i);
-
 
3330
			WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
-
 
3331
		}
-
 
3332
	} else if (rdev->family >= CHIP_CAYMAN) {
-
 
3333
		for (i = 0; i < CAYMAN_RLC_UCODE_SIZE; i++) {
-
 
3334
			WREG32(RLC_UCODE_ADDR, i);
-
 
3335
			WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
-
 
3336
		}
-
 
3337
	} else if (rdev->family >= CHIP_CEDAR) {
-
 
3338
		for (i = 0; i < EVERGREEN_RLC_UCODE_SIZE; i++) {
-
 
3339
			WREG32(RLC_UCODE_ADDR, i);
3256
	WREG32(RLC_UCODE_CNTL, 0);
3340
			WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
3257
 
3341
		}
3258
	fw_data = (const __be32 *)rdev->rlc_fw->data;
3342
	} else if (rdev->family >= CHIP_RV770) {
3259
	if (rdev->family >= CHIP_RV770) {
3343
		for (i = 0; i < R700_RLC_UCODE_SIZE; i++) {
3260
		for (i = 0; i < R700_RLC_UCODE_SIZE; i++) {
3344
			WREG32(RLC_UCODE_ADDR, i);
3261
			WREG32(RLC_UCODE_ADDR, i);
3345
			WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
3262
			WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
3346
		}
3263
		}
3347
	} else {
3264
	} else {
3348
		for (i = 0; i < RLC_UCODE_SIZE; i++) {
3265
		for (i = 0; i < R600_RLC_UCODE_SIZE; i++) {
3349
			WREG32(RLC_UCODE_ADDR, i);
3266
			WREG32(RLC_UCODE_ADDR, i);
Line 3451... Line 3368...
3451
 
3368
 
3452
	/* disable irqs */
3369
	/* disable irqs */
Line 3453... Line 3370...
3453
	r600_disable_interrupts(rdev);
3370
	r600_disable_interrupts(rdev);
-
 
3371
 
-
 
3372
	/* init rlc */
-
 
3373
	if (rdev->family >= CHIP_CEDAR)
3454
 
3374
		ret = evergreen_rlc_resume(rdev);
3455
	/* init rlc */
3375
	else
3456
	ret = r600_rlc_init(rdev);
3376
		ret = r600_rlc_resume(rdev);
3457
	if (ret) {
3377
	if (ret) {
3458
		r600_ih_ring_fini(rdev);
3378
		r600_ih_ring_fini(rdev);
Line 3470... Line 3390...
3470
	/* IH_REQ_NONSNOOP_EN=1 if ring is in non-cacheable memory, e.g., vram */
3390
	/* IH_REQ_NONSNOOP_EN=1 if ring is in non-cacheable memory, e.g., vram */
3471
	interrupt_cntl &= ~IH_REQ_NONSNOOP_EN;
3391
	interrupt_cntl &= ~IH_REQ_NONSNOOP_EN;
3472
	WREG32(INTERRUPT_CNTL, interrupt_cntl);
3392
	WREG32(INTERRUPT_CNTL, interrupt_cntl);
Line 3473... Line 3393...
3473
 
3393
 
3474
	WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8);
3394
	WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8);
Line 3475... Line 3395...
3475
	rb_bufsz = drm_order(rdev->ih.ring_size / 4);
3395
	rb_bufsz = order_base_2(rdev->ih.ring_size / 4);
3476
 
3396
 
3477
	ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE |
3397
	ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE |
Line 3517... Line 3437...
3517
	u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE;
3437
	u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE;
3518
	u32 mode_int = 0;
3438
	u32 mode_int = 0;
3519
	u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0;
3439
	u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0;
3520
	u32 grbm_int_cntl = 0;
3440
	u32 grbm_int_cntl = 0;
3521
	u32 hdmi0, hdmi1;
3441
	u32 hdmi0, hdmi1;
3522
	u32 d1grph = 0, d2grph = 0;
-
 
3523
	u32 dma_cntl;
3442
	u32 dma_cntl;
-
 
3443
	u32 thermal_int = 0;
Line 3524... Line 3444...
3524
 
3444
 
3525
	if (!rdev->irq.installed) {
3445
	if (!rdev->irq.installed) {
3526
		WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
3446
		WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
3527
		return -EINVAL;
3447
		return -EINVAL;
Line 3553... Line 3473...
3553
		hpd2 = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & ~DC_HPDx_INT_EN;
3473
		hpd2 = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & ~DC_HPDx_INT_EN;
3554
		hpd3 = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & ~DC_HPDx_INT_EN;
3474
		hpd3 = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & ~DC_HPDx_INT_EN;
3555
		hdmi0 = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3475
		hdmi0 = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3556
		hdmi1 = RREG32(HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3476
		hdmi1 = RREG32(HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3557
	}
3477
	}
-
 
3478
 
3558
	dma_cntl = RREG32(DMA_CNTL) & ~TRAP_ENABLE;
3479
	dma_cntl = RREG32(DMA_CNTL) & ~TRAP_ENABLE;
Line -... Line 3480...
-
 
3480
 
-
 
3481
	if ((rdev->family > CHIP_R600) && (rdev->family < CHIP_RV770)) {
-
 
3482
		thermal_int = RREG32(CG_THERMAL_INT) &
-
 
3483
			~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW);
-
 
3484
	} else if (rdev->family >= CHIP_RV770) {
-
 
3485
		thermal_int = RREG32(RV770_CG_THERMAL_INT) &
-
 
3486
			~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW);
-
 
3487
	}
-
 
3488
	if (rdev->irq.dpm_thermal) {
-
 
3489
		DRM_DEBUG("dpm thermal\n");
-
 
3490
		thermal_int |= THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW;
-
 
3491
	}
3559
 
3492
 
3560
	if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
3493
	if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
3561
		DRM_DEBUG("r600_irq_set: sw int\n");
3494
		DRM_DEBUG("r600_irq_set: sw int\n");
3562
		cp_int_cntl |= RB_INT_ENABLE;
3495
		cp_int_cntl |= RB_INT_ENABLE;
3563
		cp_int_cntl |= TIME_STAMP_INT_ENABLE;
3496
		cp_int_cntl |= TIME_STAMP_INT_ENABLE;
Line 3612... Line 3545...
3612
	}
3545
	}
Line 3613... Line 3546...
3613
 
3546
 
3614
	WREG32(CP_INT_CNTL, cp_int_cntl);
3547
	WREG32(CP_INT_CNTL, cp_int_cntl);
3615
	WREG32(DMA_CNTL, dma_cntl);
3548
	WREG32(DMA_CNTL, dma_cntl);
3616
	WREG32(DxMODE_INT_MASK, mode_int);
3549
	WREG32(DxMODE_INT_MASK, mode_int);
3617
	WREG32(D1GRPH_INTERRUPT_CONTROL, d1grph);
3550
	WREG32(D1GRPH_INTERRUPT_CONTROL, DxGRPH_PFLIP_INT_MASK);
3618
	WREG32(D2GRPH_INTERRUPT_CONTROL, d2grph);
3551
	WREG32(D2GRPH_INTERRUPT_CONTROL, DxGRPH_PFLIP_INT_MASK);
3619
	WREG32(GRBM_INT_CNTL, grbm_int_cntl);
3552
	WREG32(GRBM_INT_CNTL, grbm_int_cntl);
3620
	if (ASIC_IS_DCE3(rdev)) {
3553
	if (ASIC_IS_DCE3(rdev)) {
3621
		WREG32(DC_HPD1_INT_CONTROL, hpd1);
3554
		WREG32(DC_HPD1_INT_CONTROL, hpd1);
3622
		WREG32(DC_HPD2_INT_CONTROL, hpd2);
3555
		WREG32(DC_HPD2_INT_CONTROL, hpd2);
Line 3636... Line 3569...
3636
		WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2);
3569
		WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2);
3637
		WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, hpd3);
3570
		WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, hpd3);
3638
		WREG32(HDMI0_AUDIO_PACKET_CONTROL, hdmi0);
3571
		WREG32(HDMI0_AUDIO_PACKET_CONTROL, hdmi0);
3639
		WREG32(HDMI1_AUDIO_PACKET_CONTROL, hdmi1);
3572
		WREG32(HDMI1_AUDIO_PACKET_CONTROL, hdmi1);
3640
	}
3573
	}
-
 
3574
	if ((rdev->family > CHIP_R600) && (rdev->family < CHIP_RV770)) {
-
 
3575
		WREG32(CG_THERMAL_INT, thermal_int);
-
 
3576
	} else if (rdev->family >= CHIP_RV770) {
-
 
3577
		WREG32(RV770_CG_THERMAL_INT, thermal_int);
-
 
3578
	}
Line 3641... Line 3579...
3641
 
3579
 
3642
	return 0;
3580
	return 0;
Line 3643... Line 3581...
3643
}
3581
}
Line 3785... Line 3723...
3785
			wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask);
3723
			wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask);
3786
		rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
3724
		rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
3787
		tmp = RREG32(IH_RB_CNTL);
3725
		tmp = RREG32(IH_RB_CNTL);
3788
		tmp |= IH_WPTR_OVERFLOW_CLEAR;
3726
		tmp |= IH_WPTR_OVERFLOW_CLEAR;
3789
		WREG32(IH_RB_CNTL, tmp);
3727
		WREG32(IH_RB_CNTL, tmp);
-
 
3728
		wptr &= ~RB_OVERFLOW;
3790
	}
3729
	}
3791
	return (wptr & rdev->ih.ptr_mask);
3730
	return (wptr & rdev->ih.ptr_mask);
3792
}
3731
}
Line 3793... Line 3732...
3793
 
3732
 
Line 3829... Line 3768...
3829
	u32 rptr;
3768
	u32 rptr;
3830
	u32 src_id, src_data;
3769
	u32 src_id, src_data;
3831
	u32 ring_index;
3770
	u32 ring_index;
3832
	bool queue_hotplug = false;
3771
	bool queue_hotplug = false;
3833
	bool queue_hdmi = false;
3772
	bool queue_hdmi = false;
-
 
3773
	bool queue_thermal = false;
Line 3834... Line 3774...
3834
 
3774
 
3835
	if (!rdev->ih.enabled || rdev->shutdown)
3775
	if (!rdev->ih.enabled || rdev->shutdown)
Line 3836... Line 3776...
3836
		return IRQ_NONE;
3776
		return IRQ_NONE;
Line 3982... Line 3922...
3982
			default:
3922
			default:
3983
				DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
3923
				DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
3984
				break;
3924
				break;
3985
			}
3925
			}
3986
			break;
3926
			break;
-
 
3927
		case 124: /* UVD */
-
 
3928
			DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data);
-
 
3929
			radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX);
-
 
3930
			break;
3987
		case 176: /* CP_INT in ring buffer */
3931
		case 176: /* CP_INT in ring buffer */
3988
		case 177: /* CP_INT in IB1 */
3932
		case 177: /* CP_INT in IB1 */
3989
		case 178: /* CP_INT in IB2 */
3933
		case 178: /* CP_INT in IB2 */
3990
			DRM_DEBUG("IH: CP int: 0x%08x\n", src_data);
3934
			DRM_DEBUG("IH: CP int: 0x%08x\n", src_data);
3991
			radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
3935
			radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
Line 3996... Line 3940...
3996
			break;
3940
			break;
3997
		case 224: /* DMA trap event */
3941
		case 224: /* DMA trap event */
3998
			DRM_DEBUG("IH: DMA trap\n");
3942
			DRM_DEBUG("IH: DMA trap\n");
3999
			radeon_fence_process(rdev, R600_RING_TYPE_DMA_INDEX);
3943
			radeon_fence_process(rdev, R600_RING_TYPE_DMA_INDEX);
4000
			break;
3944
			break;
-
 
3945
		case 230: /* thermal low to high */
-
 
3946
			DRM_DEBUG("IH: thermal low to high\n");
-
 
3947
			rdev->pm.dpm.thermal.high_to_low = false;
-
 
3948
			queue_thermal = true;
-
 
3949
			break;
-
 
3950
		case 231: /* thermal high to low */
-
 
3951
			DRM_DEBUG("IH: thermal high to low\n");
-
 
3952
			rdev->pm.dpm.thermal.high_to_low = true;
-
 
3953
			queue_thermal = true;
-
 
3954
			break;
4001
		case 233: /* GUI IDLE */
3955
		case 233: /* GUI IDLE */
4002
			DRM_DEBUG("IH: GUI idle\n");
3956
			DRM_DEBUG("IH: GUI idle\n");
4003
			break;
3957
			break;
4004
		default:
3958
		default:
4005
			DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3959
			DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
Line 4051... Line 4005...
4051
	return 0;
4005
	return 0;
4052
#endif
4006
#endif
4053
}
4007
}
Line 4054... Line 4008...
4054
 
4008
 
4055
/**
4009
/**
4056
 * r600_ioctl_wait_idle - flush host path cache on wait idle ioctl
4010
 * r600_mmio_hdp_flush - flush Host Data Path cache via MMIO
4057
 * rdev: radeon device structure
-
 
4058
 * bo: buffer object struct which userspace is waiting for idle
4011
 * rdev: radeon device structure
4059
 *
4012
 *
4060
 * Some R6XX/R7XX doesn't seems to take into account HDP flush performed
4013
 * Some R6XX/R7XX don't seem to take into account HDP flushes performed
4061
 * through ring buffer, this leads to corruption in rendering, see
4014
 * through the ring buffer. This leads to corruption in rendering, see
4062
 * http://bugzilla.kernel.org/show_bug.cgi?id=15186 to avoid this we
4015
 * http://bugzilla.kernel.org/show_bug.cgi?id=15186 . To avoid this, we
4063
 * directly perform HDP flush by writing register through MMIO.
4016
 * directly perform the HDP flush by writing the register through MMIO.
4064
 */
4017
 */
4065
void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo)
4018
void r600_mmio_hdp_flush(struct radeon_device *rdev)
4066
{
4019
{
4067
	/* r7xx hw bug.  write to HDP_DEBUG1 followed by fb read
4020
	/* r7xx hw bug.  write to HDP_DEBUG1 followed by fb read
4068
	 * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL.
4021
	 * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL.
4069
	 * This seems to cause problems on some AGP cards. Just use the old
4022
	 * This seems to cause problems on some AGP cards. Just use the old