Subversion Repositories Kolibri OS

Rev

Rev 5078 | Rev 5179 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
1128 serge 1
/*
2
 * Copyright 2008 Advanced Micro Devices, Inc.
3
 * Copyright 2008 Red Hat Inc.
4
 * Copyright 2009 Jerome Glisse.
5
 *
6
 * Permission is hereby granted, free of charge, to any person obtaining a
7
 * copy of this software and associated documentation files (the "Software"),
8
 * to deal in the Software without restriction, including without limitation
9
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10
 * and/or sell copies of the Software, and to permit persons to whom the
11
 * Software is furnished to do so, subject to the following conditions:
12
 *
13
 * The above copyright notice and this permission notice shall be included in
14
 * all copies or substantial portions of the Software.
15
 *
16
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22
 * OTHER DEALINGS IN THE SOFTWARE.
23
 *
24
 * Authors: Dave Airlie
25
 *          Alex Deucher
26
 *          Jerome Glisse
27
 */
1963 serge 28
#include 
1233 serge 29
#include 
1221 serge 30
#include 
2997 Serge 31
#include 
32
#include 
33
#include 
1128 serge 34
#include "radeon.h"
1963 serge 35
#include "radeon_asic.h"
1221 serge 36
#include "radeon_mode.h"
37
#include "r600d.h"
38
#include "atom.h"
39
#include "avivod.h"
5078 serge 40
#include "radeon_ucode.h"
1128 serge 41
 
1221 serge 42
/* Firmware Names */
43
MODULE_FIRMWARE("radeon/R600_pfp.bin");
44
MODULE_FIRMWARE("radeon/R600_me.bin");
45
MODULE_FIRMWARE("radeon/RV610_pfp.bin");
46
MODULE_FIRMWARE("radeon/RV610_me.bin");
47
MODULE_FIRMWARE("radeon/RV630_pfp.bin");
48
MODULE_FIRMWARE("radeon/RV630_me.bin");
49
MODULE_FIRMWARE("radeon/RV620_pfp.bin");
50
MODULE_FIRMWARE("radeon/RV620_me.bin");
51
MODULE_FIRMWARE("radeon/RV635_pfp.bin");
52
MODULE_FIRMWARE("radeon/RV635_me.bin");
53
MODULE_FIRMWARE("radeon/RV670_pfp.bin");
54
MODULE_FIRMWARE("radeon/RV670_me.bin");
55
MODULE_FIRMWARE("radeon/RS780_pfp.bin");
56
MODULE_FIRMWARE("radeon/RS780_me.bin");
57
MODULE_FIRMWARE("radeon/RV770_pfp.bin");
58
MODULE_FIRMWARE("radeon/RV770_me.bin");
5078 serge 59
MODULE_FIRMWARE("radeon/RV770_smc.bin");
1221 serge 60
MODULE_FIRMWARE("radeon/RV730_pfp.bin");
61
MODULE_FIRMWARE("radeon/RV730_me.bin");
5078 serge 62
MODULE_FIRMWARE("radeon/RV730_smc.bin");
63
MODULE_FIRMWARE("radeon/RV740_smc.bin");
1221 serge 64
MODULE_FIRMWARE("radeon/RV710_pfp.bin");
65
MODULE_FIRMWARE("radeon/RV710_me.bin");
5078 serge 66
MODULE_FIRMWARE("radeon/RV710_smc.bin");
1321 serge 67
MODULE_FIRMWARE("radeon/R600_rlc.bin");
68
MODULE_FIRMWARE("radeon/R700_rlc.bin");
1963 serge 69
MODULE_FIRMWARE("radeon/CEDAR_pfp.bin");
70
MODULE_FIRMWARE("radeon/CEDAR_me.bin");
71
MODULE_FIRMWARE("radeon/CEDAR_rlc.bin");
5078 serge 72
MODULE_FIRMWARE("radeon/CEDAR_smc.bin");
1963 serge 73
MODULE_FIRMWARE("radeon/REDWOOD_pfp.bin");
74
MODULE_FIRMWARE("radeon/REDWOOD_me.bin");
75
MODULE_FIRMWARE("radeon/REDWOOD_rlc.bin");
5078 serge 76
MODULE_FIRMWARE("radeon/REDWOOD_smc.bin");
1963 serge 77
MODULE_FIRMWARE("radeon/JUNIPER_pfp.bin");
78
MODULE_FIRMWARE("radeon/JUNIPER_me.bin");
79
MODULE_FIRMWARE("radeon/JUNIPER_rlc.bin");
5078 serge 80
MODULE_FIRMWARE("radeon/JUNIPER_smc.bin");
1963 serge 81
MODULE_FIRMWARE("radeon/CYPRESS_pfp.bin");
82
MODULE_FIRMWARE("radeon/CYPRESS_me.bin");
83
MODULE_FIRMWARE("radeon/CYPRESS_rlc.bin");
5078 serge 84
MODULE_FIRMWARE("radeon/CYPRESS_smc.bin");
1963 serge 85
MODULE_FIRMWARE("radeon/PALM_pfp.bin");
86
MODULE_FIRMWARE("radeon/PALM_me.bin");
87
MODULE_FIRMWARE("radeon/SUMO_rlc.bin");
88
MODULE_FIRMWARE("radeon/SUMO_pfp.bin");
89
MODULE_FIRMWARE("radeon/SUMO_me.bin");
90
MODULE_FIRMWARE("radeon/SUMO2_pfp.bin");
91
MODULE_FIRMWARE("radeon/SUMO2_me.bin");
1221 serge 92
 
3764 Serge 93
static const u32 crtc_offsets[2] =
94
{
95
	0,
96
	AVIVO_D2CRTC_H_TOTAL - AVIVO_D1CRTC_H_TOTAL
97
};
98
 
1221 serge 99
int r600_debugfs_mc_info_init(struct radeon_device *rdev);
100
 
101
/* r600,rv610,rv630,rv620,rv635,rv670 */
1128 serge 102
int r600_mc_wait_for_idle(struct radeon_device *rdev);
2997 Serge 103
static void r600_gpu_init(struct radeon_device *rdev);
1221 serge 104
void r600_fini(struct radeon_device *rdev);
1963 serge 105
void r600_irq_disable(struct radeon_device *rdev);
106
static void r600_pcie_gen2_enable(struct radeon_device *rdev);
5078 serge 107
extern int evergreen_rlc_resume(struct radeon_device *rdev);
108
extern void rv770_set_clk_bypass_mode(struct radeon_device *rdev);
1128 serge 109
 
3764 Serge 110
/**
111
 * r600_get_xclk - get the xclk
112
 *
113
 * @rdev: radeon_device pointer
114
 *
115
 * Returns the reference clock used by the gfx engine
116
 * (r6xx, IGPs, APUs).
117
 */
118
u32 r600_get_xclk(struct radeon_device *rdev)
119
{
120
	return rdev->clock.spll.reference_freq;
121
}
122
 
5078 serge 123
int r600_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
124
{
125
	return 0;
126
}
127
 
128
void dce3_program_fmt(struct drm_encoder *encoder)
129
{
130
	struct drm_device *dev = encoder->dev;
131
	struct radeon_device *rdev = dev->dev_private;
132
	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
133
	struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
134
	struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
135
	int bpc = 0;
136
	u32 tmp = 0;
137
	enum radeon_connector_dither dither = RADEON_FMT_DITHER_DISABLE;
138
 
139
	if (connector) {
140
		struct radeon_connector *radeon_connector = to_radeon_connector(connector);
141
		bpc = radeon_get_monitor_bpc(connector);
142
		dither = radeon_connector->dither;
143
	}
144
 
145
	/* LVDS FMT is set up by atom */
146
	if (radeon_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
147
		return;
148
 
149
	/* not needed for analog */
150
	if ((radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1) ||
151
	    (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2))
152
		return;
153
 
154
	if (bpc == 0)
155
		return;
156
 
157
	switch (bpc) {
158
	case 6:
159
		if (dither == RADEON_FMT_DITHER_ENABLE)
160
			/* XXX sort out optimal dither settings */
161
			tmp |= FMT_SPATIAL_DITHER_EN;
162
		else
163
			tmp |= FMT_TRUNCATE_EN;
164
		break;
165
	case 8:
166
		if (dither == RADEON_FMT_DITHER_ENABLE)
167
			/* XXX sort out optimal dither settings */
168
			tmp |= (FMT_SPATIAL_DITHER_EN | FMT_SPATIAL_DITHER_DEPTH);
169
		else
170
			tmp |= (FMT_TRUNCATE_EN | FMT_TRUNCATE_DEPTH);
171
		break;
172
	case 10:
173
	default:
174
		/* not needed */
175
		break;
176
	}
177
 
178
	WREG32(FMT_BIT_DEPTH_CONTROL + radeon_crtc->crtc_offset, tmp);
179
}
180
 
1963 serge 181
/* get temperature in millidegrees */
182
int rv6xx_get_temp(struct radeon_device *rdev)
183
{
184
	u32 temp = (RREG32(CG_THERMAL_STATUS) & ASIC_T_MASK) >>
185
		ASIC_T_SHIFT;
186
	int actual_temp = temp & 0xff;
187
 
188
	if (temp & 0x100)
189
		actual_temp -= 256;
190
 
191
	return actual_temp * 1000;
192
}
193
 
5078 serge 194
void r600_pm_get_dynpm_state(struct radeon_device *rdev)
195
{
196
	int i;
1963 serge 197
 
5078 serge 198
	rdev->pm.dynpm_can_upclock = true;
199
	rdev->pm.dynpm_can_downclock = true;
1963 serge 200
 
5078 serge 201
	/* power state array is low to high, default is first */
202
	if ((rdev->flags & RADEON_IS_IGP) || (rdev->family == CHIP_R600)) {
203
		int min_power_state_index = 0;
1963 serge 204
 
5078 serge 205
		if (rdev->pm.num_power_states > 2)
206
			min_power_state_index = 1;
1963 serge 207
 
5078 serge 208
		switch (rdev->pm.dynpm_planned_action) {
209
		case DYNPM_ACTION_MINIMUM:
210
			rdev->pm.requested_power_state_index = min_power_state_index;
211
			rdev->pm.requested_clock_mode_index = 0;
212
			rdev->pm.dynpm_can_downclock = false;
213
			break;
214
		case DYNPM_ACTION_DOWNCLOCK:
215
			if (rdev->pm.current_power_state_index == min_power_state_index) {
216
				rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
217
				rdev->pm.dynpm_can_downclock = false;
218
			} else {
219
				if (rdev->pm.active_crtc_count > 1) {
220
					for (i = 0; i < rdev->pm.num_power_states; i++) {
221
						if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
222
							continue;
223
						else if (i >= rdev->pm.current_power_state_index) {
224
							rdev->pm.requested_power_state_index =
225
								rdev->pm.current_power_state_index;
226
							break;
227
						} else {
228
							rdev->pm.requested_power_state_index = i;
229
							break;
230
						}
231
					}
232
				} else {
233
					if (rdev->pm.current_power_state_index == 0)
234
						rdev->pm.requested_power_state_index =
235
							rdev->pm.num_power_states - 1;
236
					else
237
						rdev->pm.requested_power_state_index =
238
							rdev->pm.current_power_state_index - 1;
239
				}
240
			}
241
			rdev->pm.requested_clock_mode_index = 0;
242
			/* don't use the power state if crtcs are active and no display flag is set */
243
			if ((rdev->pm.active_crtc_count > 0) &&
244
			    (rdev->pm.power_state[rdev->pm.requested_power_state_index].
245
			     clock_info[rdev->pm.requested_clock_mode_index].flags &
246
			     RADEON_PM_MODE_NO_DISPLAY)) {
247
				rdev->pm.requested_power_state_index++;
248
			}
249
			break;
250
		case DYNPM_ACTION_UPCLOCK:
251
			if (rdev->pm.current_power_state_index == (rdev->pm.num_power_states - 1)) {
252
				rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
253
				rdev->pm.dynpm_can_upclock = false;
254
			} else {
255
				if (rdev->pm.active_crtc_count > 1) {
256
					for (i = (rdev->pm.num_power_states - 1); i >= 0; i--) {
257
						if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
258
							continue;
259
						else if (i <= rdev->pm.current_power_state_index) {
260
							rdev->pm.requested_power_state_index =
261
								rdev->pm.current_power_state_index;
262
							break;
263
						} else {
264
							rdev->pm.requested_power_state_index = i;
265
							break;
266
						}
267
					}
268
				} else
269
					rdev->pm.requested_power_state_index =
270
						rdev->pm.current_power_state_index + 1;
271
			}
272
			rdev->pm.requested_clock_mode_index = 0;
273
			break;
274
		case DYNPM_ACTION_DEFAULT:
275
			rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index;
276
			rdev->pm.requested_clock_mode_index = 0;
277
			rdev->pm.dynpm_can_upclock = false;
278
			break;
279
		case DYNPM_ACTION_NONE:
280
		default:
281
			DRM_ERROR("Requested mode for not defined action\n");
282
			return;
283
		}
284
	} else {
285
		/* XXX select a power state based on AC/DC, single/dualhead, etc. */
286
		/* for now just select the first power state and switch between clock modes */
287
		/* power state array is low to high, default is first (0) */
288
		if (rdev->pm.active_crtc_count > 1) {
289
			rdev->pm.requested_power_state_index = -1;
290
			/* start at 1 as we don't want the default mode */
291
			for (i = 1; i < rdev->pm.num_power_states; i++) {
292
				if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
293
					continue;
294
				else if ((rdev->pm.power_state[i].type == POWER_STATE_TYPE_PERFORMANCE) ||
295
					 (rdev->pm.power_state[i].type == POWER_STATE_TYPE_BATTERY)) {
296
					rdev->pm.requested_power_state_index = i;
297
					break;
298
				}
299
			}
300
			/* if nothing selected, grab the default state. */
301
			if (rdev->pm.requested_power_state_index == -1)
302
				rdev->pm.requested_power_state_index = 0;
303
		} else
304
			rdev->pm.requested_power_state_index = 1;
1963 serge 305
 
5078 serge 306
		switch (rdev->pm.dynpm_planned_action) {
307
		case DYNPM_ACTION_MINIMUM:
308
			rdev->pm.requested_clock_mode_index = 0;
309
			rdev->pm.dynpm_can_downclock = false;
310
			break;
311
		case DYNPM_ACTION_DOWNCLOCK:
312
			if (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index) {
313
				if (rdev->pm.current_clock_mode_index == 0) {
314
					rdev->pm.requested_clock_mode_index = 0;
315
					rdev->pm.dynpm_can_downclock = false;
316
				} else
317
					rdev->pm.requested_clock_mode_index =
318
						rdev->pm.current_clock_mode_index - 1;
319
			} else {
320
				rdev->pm.requested_clock_mode_index = 0;
321
				rdev->pm.dynpm_can_downclock = false;
322
			}
323
			/* don't use the power state if crtcs are active and no display flag is set */
324
			if ((rdev->pm.active_crtc_count > 0) &&
325
			    (rdev->pm.power_state[rdev->pm.requested_power_state_index].
326
			     clock_info[rdev->pm.requested_clock_mode_index].flags &
327
			     RADEON_PM_MODE_NO_DISPLAY)) {
328
				rdev->pm.requested_clock_mode_index++;
329
			}
330
			break;
331
		case DYNPM_ACTION_UPCLOCK:
332
			if (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index) {
333
				if (rdev->pm.current_clock_mode_index ==
334
				    (rdev->pm.power_state[rdev->pm.requested_power_state_index].num_clock_modes - 1)) {
335
					rdev->pm.requested_clock_mode_index = rdev->pm.current_clock_mode_index;
336
					rdev->pm.dynpm_can_upclock = false;
337
				} else
338
					rdev->pm.requested_clock_mode_index =
339
						rdev->pm.current_clock_mode_index + 1;
340
			} else {
341
				rdev->pm.requested_clock_mode_index =
342
					rdev->pm.power_state[rdev->pm.requested_power_state_index].num_clock_modes - 1;
343
				rdev->pm.dynpm_can_upclock = false;
344
			}
345
			break;
346
		case DYNPM_ACTION_DEFAULT:
347
			rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index;
348
			rdev->pm.requested_clock_mode_index = 0;
349
			rdev->pm.dynpm_can_upclock = false;
350
			break;
351
		case DYNPM_ACTION_NONE:
352
		default:
353
			DRM_ERROR("Requested mode for not defined action\n");
354
			return;
355
		}
356
	}
357
 
358
	DRM_DEBUG_DRIVER("Requested: e: %d m: %d p: %d\n",
359
		  rdev->pm.power_state[rdev->pm.requested_power_state_index].
360
		  clock_info[rdev->pm.requested_clock_mode_index].sclk,
361
		  rdev->pm.power_state[rdev->pm.requested_power_state_index].
362
		  clock_info[rdev->pm.requested_clock_mode_index].mclk,
363
		  rdev->pm.power_state[rdev->pm.requested_power_state_index].
364
		  pcie_lanes);
365
}
366
 
367
void rs780_pm_init_profile(struct radeon_device *rdev)
368
{
369
	if (rdev->pm.num_power_states == 2) {
370
		/* default */
371
		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
372
		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
373
		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
374
		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
375
		/* low sh */
376
		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 0;
377
		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 0;
378
		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
379
		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
380
		/* mid sh */
381
		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 0;
382
		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 0;
383
		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
384
		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
385
		/* high sh */
386
		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0;
387
		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 1;
388
		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
389
		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
390
		/* low mh */
391
		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 0;
392
		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 0;
393
		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
394
		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
395
		/* mid mh */
396
		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 0;
397
		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 0;
398
		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
399
		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
400
		/* high mh */
401
		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0;
402
		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 1;
403
		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
404
		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
405
	} else if (rdev->pm.num_power_states == 3) {
406
		/* default */
407
		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
408
		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
409
		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
410
		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
411
		/* low sh */
412
		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 1;
413
		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1;
414
		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
415
		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
416
		/* mid sh */
417
		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 1;
418
		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 1;
419
		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
420
		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
421
		/* high sh */
422
		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 1;
423
		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 2;
424
		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
425
		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
426
		/* low mh */
427
		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 1;
428
		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 1;
429
		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
430
		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
431
		/* mid mh */
432
		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 1;
433
		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 1;
434
		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
435
		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
436
		/* high mh */
437
		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 1;
438
		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 2;
439
		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
440
		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
441
	} else {
442
		/* default */
443
		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
444
		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
445
		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
446
		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
447
		/* low sh */
448
		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 2;
449
		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 2;
450
		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
451
		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
452
		/* mid sh */
453
		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 2;
454
		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 2;
455
		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
456
		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
457
		/* high sh */
458
		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 2;
459
		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 3;
460
		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
461
		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
462
		/* low mh */
463
		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 2;
464
		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 0;
465
		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
466
		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
467
		/* mid mh */
468
		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 2;
469
		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 0;
470
		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
471
		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
472
		/* high mh */
473
		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 2;
474
		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 3;
475
		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
476
		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
477
	}
478
}
479
 
480
void r600_pm_init_profile(struct radeon_device *rdev)
481
{
482
	int idx;
483
 
484
	if (rdev->family == CHIP_R600) {
485
		/* XXX */
486
		/* default */
487
		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
488
		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
489
		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
490
		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
491
		/* low sh */
492
		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
493
		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
494
		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
495
		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
496
		/* mid sh */
497
		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
498
		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
499
		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
500
		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
501
		/* high sh */
502
		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
503
		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
504
		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
505
		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
506
		/* low mh */
507
		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
508
		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
509
		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
510
		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
511
		/* mid mh */
512
		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
513
		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
514
		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
515
		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
516
		/* high mh */
517
		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
518
		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
519
		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
520
		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
521
	} else {
522
		if (rdev->pm.num_power_states < 4) {
523
			/* default */
524
			rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
525
			rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
526
			rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
527
			rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2;
528
			/* low sh */
529
			rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 1;
530
			rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1;
531
			rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
532
			rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
533
			/* mid sh */
534
			rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 1;
535
			rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 1;
536
			rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
537
			rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
538
			/* high sh */
539
			rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 1;
540
			rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 1;
541
			rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
542
			rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2;
543
			/* low mh */
544
			rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 2;
545
			rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 2;
546
			rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
547
			rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
548
			/* low mh */
549
			rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 2;
550
			rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 2;
551
			rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
552
			rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
553
			/* high mh */
554
			rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 2;
555
			rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 2;
556
			rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
557
			rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2;
558
		} else {
559
			/* default */
560
			rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
561
			rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
562
			rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
563
			rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2;
564
			/* low sh */
565
			if (rdev->flags & RADEON_IS_MOBILITY)
566
				idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
567
			else
568
				idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
569
			rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = idx;
570
			rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = idx;
571
			rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
572
			rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
573
			/* mid sh */
574
			rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = idx;
575
			rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = idx;
576
			rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
577
			rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
578
			/* high sh */
579
			idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
580
			rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = idx;
581
			rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = idx;
582
			rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
583
			rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2;
584
			/* low mh */
585
			if (rdev->flags & RADEON_IS_MOBILITY)
586
				idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1);
587
			else
588
				idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
589
			rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = idx;
590
			rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = idx;
591
			rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
592
			rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
593
			/* mid mh */
594
			rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = idx;
595
			rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = idx;
596
			rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
597
			rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
598
			/* high mh */
599
			idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
600
			rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = idx;
601
			rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = idx;
602
			rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
603
			rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2;
604
		}
605
	}
606
}
607
 
608
void r600_pm_misc(struct radeon_device *rdev)
609
{
610
	int req_ps_idx = rdev->pm.requested_power_state_index;
611
	int req_cm_idx = rdev->pm.requested_clock_mode_index;
612
	struct radeon_power_state *ps = &rdev->pm.power_state[req_ps_idx];
613
	struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage;
614
 
615
	if ((voltage->type == VOLTAGE_SW) && voltage->voltage) {
616
		/* 0xff01 is a flag rather then an actual voltage */
617
		if (voltage->voltage == 0xff01)
618
			return;
619
		if (voltage->voltage != rdev->pm.current_vddc) {
620
			radeon_atom_set_voltage(rdev, voltage->voltage, SET_VOLTAGE_TYPE_ASIC_VDDC);
621
			rdev->pm.current_vddc = voltage->voltage;
622
			DRM_DEBUG_DRIVER("Setting: v: %d\n", voltage->voltage);
623
		}
624
	}
625
}
626
 
1963 serge 627
bool r600_gui_idle(struct radeon_device *rdev)
628
{
629
	if (RREG32(GRBM_STATUS) & GUI_ACTIVE)
630
		return false;
631
	else
632
		return true;
633
}
634
 
1321 serge 635
/* hpd for digital panel detect/disconnect */
636
bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
637
{
638
	bool connected = false;
639
 
640
	if (ASIC_IS_DCE3(rdev)) {
641
		switch (hpd) {
642
		case RADEON_HPD_1:
643
			if (RREG32(DC_HPD1_INT_STATUS) & DC_HPDx_SENSE)
644
				connected = true;
645
			break;
646
		case RADEON_HPD_2:
647
			if (RREG32(DC_HPD2_INT_STATUS) & DC_HPDx_SENSE)
648
				connected = true;
649
			break;
650
		case RADEON_HPD_3:
651
			if (RREG32(DC_HPD3_INT_STATUS) & DC_HPDx_SENSE)
652
				connected = true;
653
			break;
654
		case RADEON_HPD_4:
655
			if (RREG32(DC_HPD4_INT_STATUS) & DC_HPDx_SENSE)
656
				connected = true;
657
			break;
658
			/* DCE 3.2 */
659
		case RADEON_HPD_5:
660
			if (RREG32(DC_HPD5_INT_STATUS) & DC_HPDx_SENSE)
661
				connected = true;
662
			break;
663
		case RADEON_HPD_6:
664
			if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE)
665
				connected = true;
666
			break;
667
		default:
668
			break;
669
		}
670
	} else {
671
		switch (hpd) {
672
		case RADEON_HPD_1:
673
			if (RREG32(DC_HOT_PLUG_DETECT1_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
674
				connected = true;
675
			break;
676
		case RADEON_HPD_2:
677
			if (RREG32(DC_HOT_PLUG_DETECT2_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
678
				connected = true;
679
			break;
680
		case RADEON_HPD_3:
681
			if (RREG32(DC_HOT_PLUG_DETECT3_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
682
				connected = true;
683
			break;
684
		default:
685
			break;
686
		}
687
	}
688
	return connected;
689
}
690
 
691
void r600_hpd_set_polarity(struct radeon_device *rdev,
692
			   enum radeon_hpd_id hpd)
693
{
694
	u32 tmp;
695
	bool connected = r600_hpd_sense(rdev, hpd);
696
 
697
	if (ASIC_IS_DCE3(rdev)) {
698
		switch (hpd) {
699
		case RADEON_HPD_1:
700
			tmp = RREG32(DC_HPD1_INT_CONTROL);
701
			if (connected)
702
				tmp &= ~DC_HPDx_INT_POLARITY;
703
			else
704
				tmp |= DC_HPDx_INT_POLARITY;
705
			WREG32(DC_HPD1_INT_CONTROL, tmp);
706
			break;
707
		case RADEON_HPD_2:
708
			tmp = RREG32(DC_HPD2_INT_CONTROL);
709
			if (connected)
710
				tmp &= ~DC_HPDx_INT_POLARITY;
711
			else
712
				tmp |= DC_HPDx_INT_POLARITY;
713
			WREG32(DC_HPD2_INT_CONTROL, tmp);
714
			break;
715
		case RADEON_HPD_3:
716
			tmp = RREG32(DC_HPD3_INT_CONTROL);
717
			if (connected)
718
				tmp &= ~DC_HPDx_INT_POLARITY;
719
			else
720
				tmp |= DC_HPDx_INT_POLARITY;
721
			WREG32(DC_HPD3_INT_CONTROL, tmp);
722
			break;
723
		case RADEON_HPD_4:
724
			tmp = RREG32(DC_HPD4_INT_CONTROL);
725
			if (connected)
726
				tmp &= ~DC_HPDx_INT_POLARITY;
727
			else
728
				tmp |= DC_HPDx_INT_POLARITY;
729
			WREG32(DC_HPD4_INT_CONTROL, tmp);
730
			break;
731
		case RADEON_HPD_5:
732
			tmp = RREG32(DC_HPD5_INT_CONTROL);
733
			if (connected)
734
				tmp &= ~DC_HPDx_INT_POLARITY;
735
			else
736
				tmp |= DC_HPDx_INT_POLARITY;
737
			WREG32(DC_HPD5_INT_CONTROL, tmp);
738
			break;
739
			/* DCE 3.2 */
740
		case RADEON_HPD_6:
741
			tmp = RREG32(DC_HPD6_INT_CONTROL);
742
			if (connected)
743
				tmp &= ~DC_HPDx_INT_POLARITY;
744
			else
745
				tmp |= DC_HPDx_INT_POLARITY;
746
			WREG32(DC_HPD6_INT_CONTROL, tmp);
747
			break;
748
		default:
749
			break;
750
		}
751
	} else {
752
		switch (hpd) {
753
		case RADEON_HPD_1:
754
			tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL);
755
			if (connected)
756
				tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
757
			else
758
				tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
759
			WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
760
			break;
761
		case RADEON_HPD_2:
762
			tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL);
763
			if (connected)
764
				tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
765
			else
766
				tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
767
			WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
768
			break;
769
		case RADEON_HPD_3:
770
			tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL);
771
			if (connected)
772
				tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
773
			else
774
				tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
775
			WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
776
			break;
777
		default:
778
			break;
779
		}
780
	}
781
}
782
 
783
void r600_hpd_init(struct radeon_device *rdev)
784
{
785
	struct drm_device *dev = rdev->ddev;
786
	struct drm_connector *connector;
2997 Serge 787
	unsigned enable = 0;
1321 serge 788
 
2997 Serge 789
	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
790
		struct radeon_connector *radeon_connector = to_radeon_connector(connector);
791
 
792
		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
793
		    connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
794
			/* don't try to enable hpd on eDP or LVDS avoid breaking the
795
			 * aux dp channel on imac and help (but not completely fix)
796
			 * https://bugzilla.redhat.com/show_bug.cgi?id=726143
797
			 */
798
			continue;
799
		}
1321 serge 800
	if (ASIC_IS_DCE3(rdev)) {
801
		u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) | DC_HPDx_RX_INT_TIMER(0xfa);
802
		if (ASIC_IS_DCE32(rdev))
803
			tmp |= DC_HPDx_EN;
804
 
805
			switch (radeon_connector->hpd.hpd) {
806
			case RADEON_HPD_1:
807
				WREG32(DC_HPD1_CONTROL, tmp);
808
				break;
809
			case RADEON_HPD_2:
810
				WREG32(DC_HPD2_CONTROL, tmp);
811
				break;
812
			case RADEON_HPD_3:
813
				WREG32(DC_HPD3_CONTROL, tmp);
814
				break;
815
			case RADEON_HPD_4:
816
				WREG32(DC_HPD4_CONTROL, tmp);
817
				break;
818
				/* DCE 3.2 */
819
			case RADEON_HPD_5:
820
				WREG32(DC_HPD5_CONTROL, tmp);
821
				break;
822
			case RADEON_HPD_6:
823
				WREG32(DC_HPD6_CONTROL, tmp);
824
				break;
825
			default:
826
				break;
827
			}
828
	} else {
829
			switch (radeon_connector->hpd.hpd) {
830
			case RADEON_HPD_1:
831
				WREG32(DC_HOT_PLUG_DETECT1_CONTROL, DC_HOT_PLUG_DETECTx_EN);
832
				break;
833
			case RADEON_HPD_2:
834
				WREG32(DC_HOT_PLUG_DETECT2_CONTROL, DC_HOT_PLUG_DETECTx_EN);
835
				break;
836
			case RADEON_HPD_3:
837
				WREG32(DC_HOT_PLUG_DETECT3_CONTROL, DC_HOT_PLUG_DETECTx_EN);
838
				break;
839
			default:
840
				break;
841
			}
842
		}
2997 Serge 843
		enable |= 1 << radeon_connector->hpd.hpd;
844
		radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
1321 serge 845
	}
2997 Serge 846
//	radeon_irq_kms_enable_hpd(rdev, enable);
1321 serge 847
}
848
 
849
void r600_hpd_fini(struct radeon_device *rdev)
850
{
851
	struct drm_device *dev = rdev->ddev;
852
	struct drm_connector *connector;
2997 Serge 853
	unsigned disable = 0;
1321 serge 854
 
855
		list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
856
			struct radeon_connector *radeon_connector = to_radeon_connector(connector);
2997 Serge 857
		if (ASIC_IS_DCE3(rdev)) {
1321 serge 858
			switch (radeon_connector->hpd.hpd) {
859
			case RADEON_HPD_1:
860
				WREG32(DC_HPD1_CONTROL, 0);
861
				break;
862
			case RADEON_HPD_2:
863
				WREG32(DC_HPD2_CONTROL, 0);
864
				break;
865
			case RADEON_HPD_3:
866
				WREG32(DC_HPD3_CONTROL, 0);
867
				break;
868
			case RADEON_HPD_4:
869
				WREG32(DC_HPD4_CONTROL, 0);
870
				break;
871
				/* DCE 3.2 */
872
			case RADEON_HPD_5:
873
				WREG32(DC_HPD5_CONTROL, 0);
874
				break;
875
			case RADEON_HPD_6:
876
				WREG32(DC_HPD6_CONTROL, 0);
877
				break;
878
			default:
879
				break;
880
			}
881
	} else {
882
			switch (radeon_connector->hpd.hpd) {
883
			case RADEON_HPD_1:
884
				WREG32(DC_HOT_PLUG_DETECT1_CONTROL, 0);
885
				break;
886
			case RADEON_HPD_2:
887
				WREG32(DC_HOT_PLUG_DETECT2_CONTROL, 0);
888
				break;
889
			case RADEON_HPD_3:
890
				WREG32(DC_HOT_PLUG_DETECT3_CONTROL, 0);
891
				break;
892
			default:
893
				break;
894
			}
895
		}
2997 Serge 896
		disable |= 1 << radeon_connector->hpd.hpd;
1321 serge 897
	}
2997 Serge 898
//	radeon_irq_kms_disable_hpd(rdev, disable);
1321 serge 899
}
900
 
1128 serge 901
/*
1221 serge 902
 * R600 PCIE GART
1128 serge 903
 */
1221 serge 904
void r600_pcie_gart_tlb_flush(struct radeon_device *rdev)
905
{
906
	unsigned i;
907
	u32 tmp;
1128 serge 908
 
1430 serge 909
	/* flush hdp cache so updates hit vram */
1963 serge 910
	if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) &&
911
	    !(rdev->flags & RADEON_IS_AGP)) {
2997 Serge 912
		void __iomem *ptr = (void *)rdev->gart.ptr;
1963 serge 913
		u32 tmp;
914
 
915
		/* r7xx hw bug.  write to HDP_DEBUG1 followed by fb read
916
		 * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL
917
		 * This seems to cause problems on some AGP cards. Just use the old
918
		 * method for them.
919
		 */
920
		WREG32(HDP_DEBUG1, 0);
921
		tmp = readl((void __iomem *)ptr);
922
	} else
1430 serge 923
	WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
924
 
1221 serge 925
	WREG32(VM_CONTEXT0_INVALIDATION_LOW_ADDR, rdev->mc.gtt_start >> 12);
926
	WREG32(VM_CONTEXT0_INVALIDATION_HIGH_ADDR, (rdev->mc.gtt_end - 1) >> 12);
927
	WREG32(VM_CONTEXT0_REQUEST_RESPONSE, REQUEST_TYPE(1));
928
	for (i = 0; i < rdev->usec_timeout; i++) {
929
		/* read MC_STATUS */
930
		tmp = RREG32(VM_CONTEXT0_REQUEST_RESPONSE);
931
		tmp = (tmp & RESPONSE_TYPE_MASK) >> RESPONSE_TYPE_SHIFT;
932
		if (tmp == 2) {
933
			printk(KERN_WARNING "[drm] r600 flush TLB failed\n");
934
			return;
935
		}
936
		if (tmp) {
937
			return;
938
		}
939
		udelay(1);
1128 serge 940
	}
1221 serge 941
}
1128 serge 942
 
1221 serge 943
int r600_pcie_gart_init(struct radeon_device *rdev)
944
{
945
	int r;
946
 
2997 Serge 947
	if (rdev->gart.robj) {
1963 serge 948
		WARN(1, "R600 PCIE GART already initialized\n");
1221 serge 949
		return 0;
950
	}
951
	/* Initialize common gart structure */
952
	r = radeon_gart_init(rdev);
953
	if (r)
954
		return r;
955
	rdev->gart.table_size = rdev->gart.num_gpu_pages * 8;
956
	return radeon_gart_table_vram_alloc(rdev);
957
}
958
 
2997 Serge 959
static int r600_pcie_gart_enable(struct radeon_device *rdev)
1221 serge 960
{
961
	u32 tmp;
962
	int r, i;
963
 
2997 Serge 964
	if (rdev->gart.robj == NULL) {
1221 serge 965
		dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
966
		return -EINVAL;
967
	}
968
	r = radeon_gart_table_vram_pin(rdev);
969
	if (r)
970
		return r;
971
 
972
	/* Setup L2 cache */
973
	WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
974
				ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
975
				EFFECTIVE_L2_QUEUE_SIZE(7));
976
	WREG32(VM_L2_CNTL2, 0);
977
	WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
978
	/* Setup TLB control */
979
	tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
980
		SYSTEM_ACCESS_MODE_NOT_IN_SYS |
981
		EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
982
		ENABLE_WAIT_L2_QUERY;
983
	WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
984
	WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
985
	WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp | ENABLE_L1_STRICT_ORDERING);
986
	WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
987
	WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
988
	WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
989
	WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
990
	WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
991
	WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
992
	WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
993
	WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
994
	WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
995
	WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
996
	WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
997
	WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
998
	WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
999
	WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
1000
	WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
1001
				RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
1002
	WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
1003
			(u32)(rdev->dummy_page.addr >> 12));
1004
	for (i = 1; i < 7; i++)
1005
		WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
1006
 
1007
	r600_pcie_gart_tlb_flush(rdev);
2997 Serge 1008
	DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
1009
		 (unsigned)(rdev->mc.gtt_size >> 20),
1010
		 (unsigned long long)rdev->gart.table_addr);
1221 serge 1011
	rdev->gart.ready = true;
1128 serge 1012
	return 0;
1013
}
1014
 
2997 Serge 1015
static void r600_pcie_gart_disable(struct radeon_device *rdev)
1128 serge 1016
{
1221 serge 1017
	u32 tmp;
2997 Serge 1018
	int i;
1221 serge 1019
 
1020
	/* Disable all tables */
1021
	for (i = 0; i < 7; i++)
1022
		WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
1023
 
1024
	/* Disable L2 cache */
1025
	WREG32(VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING |
1026
				EFFECTIVE_L2_QUEUE_SIZE(7));
1027
	WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
1028
	/* Setup L1 TLB control */
1029
	tmp = EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
1030
		ENABLE_WAIT_L2_QUERY;
1031
	WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
1032
	WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
1033
	WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
1034
	WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
1035
	WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
1036
	WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
1037
	WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
1038
	WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
1039
	WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp);
1040
	WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp);
1041
	WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
1042
	WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
1043
	WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp);
1044
	WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
2997 Serge 1045
	radeon_gart_table_vram_unpin(rdev);
1128 serge 1046
}
1047
 
2997 Serge 1048
static void r600_pcie_gart_fini(struct radeon_device *rdev)
1221 serge 1049
{
1963 serge 1050
	radeon_gart_fini(rdev);
1221 serge 1051
	r600_pcie_gart_disable(rdev);
1052
	radeon_gart_table_vram_free(rdev);
1053
}
1128 serge 1054
 
2997 Serge 1055
static void r600_agp_enable(struct radeon_device *rdev)
1128 serge 1056
{
1221 serge 1057
	u32 tmp;
1058
	int i;
1059
 
1060
	/* Setup L2 cache */
1061
	WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
1062
				ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
1063
				EFFECTIVE_L2_QUEUE_SIZE(7));
1064
	WREG32(VM_L2_CNTL2, 0);
1065
	WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
1066
	/* Setup TLB control */
1067
	tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
1068
		SYSTEM_ACCESS_MODE_NOT_IN_SYS |
1069
		EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
1070
		ENABLE_WAIT_L2_QUERY;
1071
	WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
1072
	WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
1073
	WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp | ENABLE_L1_STRICT_ORDERING);
1074
	WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
1075
	WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
1076
	WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
1077
	WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
1078
	WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
1079
	WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
1080
	WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
1081
	WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
1082
	WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
1083
	WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
1084
	WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
1085
	for (i = 0; i < 7; i++)
1086
		WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
1128 serge 1087
}
1088
 
1089
int r600_mc_wait_for_idle(struct radeon_device *rdev)
1090
{
1221 serge 1091
	unsigned i;
1092
	u32 tmp;
1093
 
1094
	for (i = 0; i < rdev->usec_timeout; i++) {
1095
		/* read MC_STATUS */
1096
		tmp = RREG32(R_000E50_SRBM_STATUS) & 0x3F00;
1097
		if (!tmp)
1128 serge 1098
	return 0;
1221 serge 1099
		udelay(1);
1100
	}
1101
	return -1;
1128 serge 1102
}
1103
 
3764 Serge 1104
uint32_t rs780_mc_rreg(struct radeon_device *rdev, uint32_t reg)
1105
{
5078 serge 1106
	unsigned long flags;
3764 Serge 1107
	uint32_t r;
1108
 
5078 serge 1109
	spin_lock_irqsave(&rdev->mc_idx_lock, flags);
3764 Serge 1110
	WREG32(R_0028F8_MC_INDEX, S_0028F8_MC_IND_ADDR(reg));
1111
	r = RREG32(R_0028FC_MC_DATA);
1112
	WREG32(R_0028F8_MC_INDEX, ~C_0028F8_MC_IND_ADDR);
5078 serge 1113
	spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
3764 Serge 1114
	return r;
1115
}
1116
 
1117
void rs780_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
1118
{
5078 serge 1119
	unsigned long flags;
1120
 
1121
	spin_lock_irqsave(&rdev->mc_idx_lock, flags);
3764 Serge 1122
	WREG32(R_0028F8_MC_INDEX, S_0028F8_MC_IND_ADDR(reg) |
1123
		S_0028F8_MC_IND_WR_EN(1));
1124
	WREG32(R_0028FC_MC_DATA, v);
1125
	WREG32(R_0028F8_MC_INDEX, 0x7F);
5078 serge 1126
	spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
3764 Serge 1127
}
1128
 
1221 serge 1129
static void r600_mc_program(struct radeon_device *rdev)
1128 serge 1130
{
1221 serge 1131
	struct rv515_mc_save save;
1132
	u32 tmp;
1133
	int i, j;
1134
 
1135
	/* Initialize HDP */
1136
	for (i = 0, j = 0; i < 32; i++, j += 0x18) {
1137
		WREG32((0x2c14 + j), 0x00000000);
1138
		WREG32((0x2c18 + j), 0x00000000);
1139
		WREG32((0x2c1c + j), 0x00000000);
1140
		WREG32((0x2c20 + j), 0x00000000);
1141
		WREG32((0x2c24 + j), 0x00000000);
1142
	}
1143
	WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
1144
 
1145
	rv515_mc_stop(rdev, &save);
1146
	if (r600_mc_wait_for_idle(rdev)) {
1147
		dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1148
	}
1149
	/* Lockout access through VGA aperture (doesn't exist before R600) */
1150
	WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
1151
	/* Update configuration */
1152
	if (rdev->flags & RADEON_IS_AGP) {
1153
		if (rdev->mc.vram_start < rdev->mc.gtt_start) {
1154
			/* VRAM before AGP */
1155
			WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
1156
				rdev->mc.vram_start >> 12);
1157
			WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
1158
				rdev->mc.gtt_end >> 12);
1159
		} else {
1160
			/* VRAM after AGP */
1161
			WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
1162
				rdev->mc.gtt_start >> 12);
1163
			WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
1164
				rdev->mc.vram_end >> 12);
1165
		}
1166
	} else {
1167
		WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start >> 12);
1168
		WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.vram_end >> 12);
1169
	}
2997 Serge 1170
	WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, rdev->vram_scratch.gpu_addr >> 12);
1221 serge 1171
	tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
1172
	tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
1173
	WREG32(MC_VM_FB_LOCATION, tmp);
1174
	WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
1175
	WREG32(HDP_NONSURFACE_INFO, (2 << 7));
1963 serge 1176
	WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF);
1221 serge 1177
	if (rdev->flags & RADEON_IS_AGP) {
1178
		WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 22);
1179
		WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 22);
1180
		WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22);
1181
	} else {
1182
		WREG32(MC_VM_AGP_BASE, 0);
1183
		WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
1184
		WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
1185
	}
1186
	if (r600_mc_wait_for_idle(rdev)) {
1187
		dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1188
	}
1189
	rv515_mc_resume(rdev, &save);
1190
	/* we need to own VRAM, so turn off the VGA renderer here
1191
	 * to stop it overwriting our objects */
1192
	rv515_vga_render_disable(rdev);
1128 serge 1193
}
1194
 
1430 serge 1195
/**
1196
 * r600_vram_gtt_location - try to find VRAM & GTT location
1197
 * @rdev: radeon device structure holding all necessary informations
1198
 * @mc: memory controller structure holding memory informations
1199
 *
1200
 * Function will place try to place VRAM at same place as in CPU (PCI)
1201
 * address space as some GPU seems to have issue when we reprogram at
1202
 * different address space.
1203
 *
1204
 * If there is not enough space to fit the unvisible VRAM after the
1205
 * aperture then we limit the VRAM size to the aperture.
1206
 *
1207
 * If we are using AGP then place VRAM adjacent to AGP aperture are we need
1208
 * them to be in one from GPU point of view so that we can program GPU to
1209
 * catch access outside them (weird GPU policy see ??).
1210
 *
1211
 * This function will never fails, worst case are limiting VRAM or GTT.
1212
 *
1213
 * Note: GTT start, end, size should be initialized before calling this
1214
 * function on AGP platform.
1215
 */
1963 serge 1216
static void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
1430 serge 1217
{
1218
	u64 size_bf, size_af;
1219
 
1220
	if (mc->mc_vram_size > 0xE0000000) {
1221
		/* leave room for at least 512M GTT */
1222
		dev_warn(rdev->dev, "limiting VRAM\n");
1223
		mc->real_vram_size = 0xE0000000;
1224
		mc->mc_vram_size = 0xE0000000;
1225
	}
1226
	if (rdev->flags & RADEON_IS_AGP) {
1227
		size_bf = mc->gtt_start;
3764 Serge 1228
		size_af = mc->mc_mask - mc->gtt_end;
1430 serge 1229
		if (size_bf > size_af) {
1230
			if (mc->mc_vram_size > size_bf) {
1231
				dev_warn(rdev->dev, "limiting VRAM\n");
1232
				mc->real_vram_size = size_bf;
1233
				mc->mc_vram_size = size_bf;
1234
			}
1235
			mc->vram_start = mc->gtt_start - mc->mc_vram_size;
1236
		} else {
1237
			if (mc->mc_vram_size > size_af) {
1238
				dev_warn(rdev->dev, "limiting VRAM\n");
1239
				mc->real_vram_size = size_af;
1240
				mc->mc_vram_size = size_af;
1241
			}
2997 Serge 1242
			mc->vram_start = mc->gtt_end + 1;
1430 serge 1243
		}
1244
		mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
1245
		dev_info(rdev->dev, "VRAM: %lluM 0x%08llX - 0x%08llX (%lluM used)\n",
1246
				mc->mc_vram_size >> 20, mc->vram_start,
1247
				mc->vram_end, mc->real_vram_size >> 20);
1248
	} else {
1249
		u64 base = 0;
1963 serge 1250
		if (rdev->flags & RADEON_IS_IGP) {
1251
			base = RREG32(MC_VM_FB_LOCATION) & 0xFFFF;
1252
			base <<= 24;
1253
		}
1430 serge 1254
		radeon_vram_location(rdev, &rdev->mc, base);
1963 serge 1255
		rdev->mc.gtt_base_align = 0;
1430 serge 1256
		radeon_gtt_location(rdev, mc);
1257
	}
1258
}
1259
 
2997 Serge 1260
static int r600_mc_init(struct radeon_device *rdev)
1128 serge 1261
{
1221 serge 1262
	u32 tmp;
1268 serge 1263
	int chansize, numchan;
3764 Serge 1264
	uint32_t h_addr, l_addr;
1265
	unsigned long long k8_addr;
1128 serge 1266
 
1221 serge 1267
	/* Get VRAM informations */
1128 serge 1268
	rdev->mc.vram_is_ddr = true;
1221 serge 1269
	tmp = RREG32(RAMCFG);
1270
	if (tmp & CHANSIZE_OVERRIDE) {
1128 serge 1271
		chansize = 16;
1221 serge 1272
	} else if (tmp & CHANSIZE_MASK) {
1128 serge 1273
		chansize = 64;
1274
	} else {
1275
		chansize = 32;
1276
	}
1268 serge 1277
	tmp = RREG32(CHMAP);
1278
	switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
1279
	case 0:
1280
	default:
1281
		numchan = 1;
1282
		break;
1283
	case 1:
1284
		numchan = 2;
1285
		break;
1286
	case 2:
1287
		numchan = 4;
1288
		break;
1289
	case 3:
1290
		numchan = 8;
1291
		break;
1128 serge 1292
	}
1268 serge 1293
	rdev->mc.vram_width = numchan * chansize;
1221 serge 1294
	/* Could aper size report 0 ? */
1963 serge 1295
	rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
1296
	rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
1221 serge 1297
	/* Setup GPU memory space */
1298
	rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
1299
	rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
1430 serge 1300
	rdev->mc.visible_vram_size = rdev->mc.aper_size;
1301
	r600_vram_gtt_location(rdev, &rdev->mc);
1963 serge 1302
 
1303
	if (rdev->flags & RADEON_IS_IGP) {
1304
		rs690_pm_info(rdev);
1403 serge 1305
		rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
3764 Serge 1306
 
1307
		if (rdev->family == CHIP_RS780 || rdev->family == CHIP_RS880) {
1308
			/* Use K8 direct mapping for fast fb access. */
1309
			rdev->fastfb_working = false;
1310
			h_addr = G_000012_K8_ADDR_EXT(RREG32_MC(R_000012_MC_MISC_UMA_CNTL));
1311
			l_addr = RREG32_MC(R_000011_K8_FB_LOCATION);
1312
			k8_addr = ((unsigned long long)h_addr) << 32 | l_addr;
1313
#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
1314
			if (k8_addr + rdev->mc.visible_vram_size < 0x100000000ULL)
1315
#endif
1316
			{
1317
				/* FastFB shall be used with UMA memory. Here it is simply disabled when sideport
1318
		 		* memory is present.
1319
		 		*/
1320
				if (rdev->mc.igp_sideport_enabled == false && radeon_fastfb == 1) {
1321
					DRM_INFO("Direct mapping: aper base at 0x%llx, replaced by direct mapping base 0x%llx.\n",
1322
						(unsigned long long)rdev->mc.aper_base, k8_addr);
1323
					rdev->mc.aper_base = (resource_size_t)k8_addr;
1324
					rdev->fastfb_working = true;
1963 serge 1325
	}
3764 Serge 1326
			}
1327
  		}
1328
	}
1329
 
1963 serge 1330
	radeon_update_bandwidth_info(rdev);
1221 serge 1331
	return 0;
1128 serge 1332
}
1333
 
2997 Serge 1334
int r600_vram_scratch_init(struct radeon_device *rdev)
1335
{
1336
	int r;
1337
 
1338
	if (rdev->vram_scratch.robj == NULL) {
1339
		r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE,
1340
				     PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
5078 serge 1341
				     0, NULL, &rdev->vram_scratch.robj);
2997 Serge 1342
		if (r) {
1343
			return r;
1344
		}
1345
	}
1346
 
1347
	r = radeon_bo_reserve(rdev->vram_scratch.robj, false);
1348
	if (unlikely(r != 0))
1349
		return r;
1350
	r = radeon_bo_pin(rdev->vram_scratch.robj,
1351
			  RADEON_GEM_DOMAIN_VRAM, &rdev->vram_scratch.gpu_addr);
1352
	if (r) {
1353
		radeon_bo_unreserve(rdev->vram_scratch.robj);
1354
		return r;
1355
	}
1356
	r = radeon_bo_kmap(rdev->vram_scratch.robj,
1357
				(void **)&rdev->vram_scratch.ptr);
1358
	if (r)
1359
		radeon_bo_unpin(rdev->vram_scratch.robj);
1360
	radeon_bo_unreserve(rdev->vram_scratch.robj);
1361
 
1362
	return r;
1363
}
3764 Serge 1364
 
1365
void r600_vram_scratch_fini(struct radeon_device *rdev)
1128 serge 1366
{
3764 Serge 1367
	int r;
1128 serge 1368
 
3764 Serge 1369
	if (rdev->vram_scratch.robj == NULL) {
3192 Serge 1370
		return;
3764 Serge 1371
	}
1372
	r = radeon_bo_reserve(rdev->vram_scratch.robj, false);
1373
	if (likely(r == 0)) {
1374
		radeon_bo_kunmap(rdev->vram_scratch.robj);
1375
		radeon_bo_unpin(rdev->vram_scratch.robj);
1376
		radeon_bo_unreserve(rdev->vram_scratch.robj);
1377
	}
1378
	radeon_bo_unref(&rdev->vram_scratch.robj);
1379
}
1963 serge 1380
 
3764 Serge 1381
void r600_set_bios_scratch_engine_hung(struct radeon_device *rdev, bool hung)
1382
{
1383
	u32 tmp = RREG32(R600_BIOS_3_SCRATCH);
1384
 
1385
	if (hung)
1386
		tmp |= ATOM_S3_ASIC_GUI_ENGINE_HUNG;
1387
	else
1388
		tmp &= ~ATOM_S3_ASIC_GUI_ENGINE_HUNG;
1389
 
1390
	WREG32(R600_BIOS_3_SCRATCH, tmp);
1391
}
1392
 
1393
static void r600_print_gpu_status_regs(struct radeon_device *rdev)
1394
{
3192 Serge 1395
	dev_info(rdev->dev, "  R_008010_GRBM_STATUS      = 0x%08X\n",
1221 serge 1396
		RREG32(R_008010_GRBM_STATUS));
3192 Serge 1397
	dev_info(rdev->dev, "  R_008014_GRBM_STATUS2     = 0x%08X\n",
1221 serge 1398
		RREG32(R_008014_GRBM_STATUS2));
3192 Serge 1399
	dev_info(rdev->dev, "  R_000E50_SRBM_STATUS      = 0x%08X\n",
1221 serge 1400
		RREG32(R_000E50_SRBM_STATUS));
2997 Serge 1401
	dev_info(rdev->dev, "  R_008674_CP_STALLED_STAT1 = 0x%08X\n",
1402
		RREG32(CP_STALLED_STAT1));
1403
	dev_info(rdev->dev, "  R_008678_CP_STALLED_STAT2 = 0x%08X\n",
1404
		RREG32(CP_STALLED_STAT2));
1405
	dev_info(rdev->dev, "  R_00867C_CP_BUSY_STAT     = 0x%08X\n",
1406
		RREG32(CP_BUSY_STAT));
1407
	dev_info(rdev->dev, "  R_008680_CP_STAT          = 0x%08X\n",
1408
		RREG32(CP_STAT));
3764 Serge 1409
	dev_info(rdev->dev, "  R_00D034_DMA_STATUS_REG   = 0x%08X\n",
1410
		RREG32(DMA_STATUS_REG));
1411
}
3192 Serge 1412
 
3764 Serge 1413
static bool r600_is_display_hung(struct radeon_device *rdev)
1414
{
1415
	u32 crtc_hung = 0;
1416
	u32 crtc_status[2];
1417
	u32 i, j, tmp;
1418
 
1419
	for (i = 0; i < rdev->num_crtc; i++) {
1420
		if (RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i]) & AVIVO_CRTC_EN) {
1421
			crtc_status[i] = RREG32(AVIVO_D1CRTC_STATUS_HV_COUNT + crtc_offsets[i]);
1422
			crtc_hung |= (1 << i);
1423
		}
1424
	}
1425
 
1426
	for (j = 0; j < 10; j++) {
1427
		for (i = 0; i < rdev->num_crtc; i++) {
1428
			if (crtc_hung & (1 << i)) {
1429
				tmp = RREG32(AVIVO_D1CRTC_STATUS_HV_COUNT + crtc_offsets[i]);
1430
				if (tmp != crtc_status[i])
1431
					crtc_hung &= ~(1 << i);
1432
			}
1433
		}
1434
		if (crtc_hung == 0)
1435
			return false;
1436
		udelay(100);
1437
	}
1438
 
1439
	return true;
1440
}
1441
 
5078 serge 1442
u32 r600_gpu_check_soft_reset(struct radeon_device *rdev)
3764 Serge 1443
{
1444
	u32 reset_mask = 0;
1445
	u32 tmp;
1446
 
1447
	/* GRBM_STATUS */
1448
	tmp = RREG32(R_008010_GRBM_STATUS);
1449
	if (rdev->family >= CHIP_RV770) {
1450
		if (G_008010_PA_BUSY(tmp) | G_008010_SC_BUSY(tmp) |
1451
		    G_008010_SH_BUSY(tmp) | G_008010_SX_BUSY(tmp) |
1452
		    G_008010_TA_BUSY(tmp) | G_008010_VGT_BUSY(tmp) |
1453
		    G_008010_DB03_BUSY(tmp) | G_008010_CB03_BUSY(tmp) |
1454
		    G_008010_SPI03_BUSY(tmp) | G_008010_VGT_BUSY_NO_DMA(tmp))
1455
			reset_mask |= RADEON_RESET_GFX;
1456
	} else {
1457
		if (G_008010_PA_BUSY(tmp) | G_008010_SC_BUSY(tmp) |
1458
		    G_008010_SH_BUSY(tmp) | G_008010_SX_BUSY(tmp) |
1459
		    G_008010_TA03_BUSY(tmp) | G_008010_VGT_BUSY(tmp) |
1460
		    G_008010_DB03_BUSY(tmp) | G_008010_CB03_BUSY(tmp) |
1461
		    G_008010_SPI03_BUSY(tmp) | G_008010_VGT_BUSY_NO_DMA(tmp))
1462
			reset_mask |= RADEON_RESET_GFX;
1463
	}
1464
 
1465
	if (G_008010_CF_RQ_PENDING(tmp) | G_008010_PF_RQ_PENDING(tmp) |
1466
	    G_008010_CP_BUSY(tmp) | G_008010_CP_COHERENCY_BUSY(tmp))
1467
		reset_mask |= RADEON_RESET_CP;
1468
 
1469
	if (G_008010_GRBM_EE_BUSY(tmp))
1470
		reset_mask |= RADEON_RESET_GRBM | RADEON_RESET_GFX | RADEON_RESET_CP;
1471
 
1472
	/* DMA_STATUS_REG */
1473
	tmp = RREG32(DMA_STATUS_REG);
1474
	if (!(tmp & DMA_IDLE))
1475
		reset_mask |= RADEON_RESET_DMA;
1476
 
1477
	/* SRBM_STATUS */
1478
	tmp = RREG32(R_000E50_SRBM_STATUS);
1479
	if (G_000E50_RLC_RQ_PENDING(tmp) | G_000E50_RLC_BUSY(tmp))
1480
		reset_mask |= RADEON_RESET_RLC;
1481
 
1482
	if (G_000E50_IH_BUSY(tmp))
1483
		reset_mask |= RADEON_RESET_IH;
1484
 
1485
	if (G_000E50_SEM_BUSY(tmp))
1486
		reset_mask |= RADEON_RESET_SEM;
1487
 
1488
	if (G_000E50_GRBM_RQ_PENDING(tmp))
1489
		reset_mask |= RADEON_RESET_GRBM;
1490
 
1491
	if (G_000E50_VMC_BUSY(tmp))
1492
		reset_mask |= RADEON_RESET_VMC;
1493
 
1494
	if (G_000E50_MCB_BUSY(tmp) | G_000E50_MCDZ_BUSY(tmp) |
1495
	    G_000E50_MCDY_BUSY(tmp) | G_000E50_MCDX_BUSY(tmp) |
1496
	    G_000E50_MCDW_BUSY(tmp))
1497
		reset_mask |= RADEON_RESET_MC;
1498
 
1499
	if (r600_is_display_hung(rdev))
1500
		reset_mask |= RADEON_RESET_DISPLAY;
1501
 
1502
	/* Skip MC reset as it's mostly likely not hung, just busy */
1503
	if (reset_mask & RADEON_RESET_MC) {
1504
		DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask);
1505
		reset_mask &= ~RADEON_RESET_MC;
1506
	}
1507
 
1508
	return reset_mask;
1509
}
1510
 
1511
static void r600_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
1512
{
1513
	struct rv515_mc_save save;
1514
	u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
1515
	u32 tmp;
1516
 
1517
	if (reset_mask == 0)
1518
		return;
1519
 
1520
	dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask);
1521
 
1522
	r600_print_gpu_status_regs(rdev);
1523
 
1221 serge 1524
	/* Disable CP parsing/prefetching */
3764 Serge 1525
	if (rdev->family >= CHIP_RV770)
1526
		WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1) | S_0086D8_CP_PFP_HALT(1));
1527
	else
1963 serge 1528
	WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
3192 Serge 1529
 
3764 Serge 1530
	/* disable the RLC */
1531
	WREG32(RLC_CNTL, 0);
1532
 
1533
	if (reset_mask & RADEON_RESET_DMA) {
1534
		/* Disable DMA */
1535
		tmp = RREG32(DMA_RB_CNTL);
1536
		tmp &= ~DMA_RB_ENABLE;
1537
		WREG32(DMA_RB_CNTL, tmp);
1538
	}
1539
 
1540
	mdelay(50);
1541
 
1542
	rv515_mc_stop(rdev, &save);
1543
	if (r600_mc_wait_for_idle(rdev)) {
1544
		dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1545
	}
1546
 
1547
	if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE)) {
1548
		if (rdev->family >= CHIP_RV770)
1549
			grbm_soft_reset |= S_008020_SOFT_RESET_DB(1) |
1550
				S_008020_SOFT_RESET_CB(1) |
1551
				S_008020_SOFT_RESET_PA(1) |
1552
				S_008020_SOFT_RESET_SC(1) |
1553
				S_008020_SOFT_RESET_SPI(1) |
1554
				S_008020_SOFT_RESET_SX(1) |
1555
				S_008020_SOFT_RESET_SH(1) |
1556
				S_008020_SOFT_RESET_TC(1) |
1557
				S_008020_SOFT_RESET_TA(1) |
1558
				S_008020_SOFT_RESET_VC(1) |
1559
				S_008020_SOFT_RESET_VGT(1);
1560
		else
1561
			grbm_soft_reset |= S_008020_SOFT_RESET_CR(1) |
1221 serge 1562
			S_008020_SOFT_RESET_DB(1) |
1563
			S_008020_SOFT_RESET_CB(1) |
1564
			S_008020_SOFT_RESET_PA(1) |
1565
			S_008020_SOFT_RESET_SC(1) |
1566
			S_008020_SOFT_RESET_SMX(1) |
1567
			S_008020_SOFT_RESET_SPI(1) |
1568
			S_008020_SOFT_RESET_SX(1) |
1569
			S_008020_SOFT_RESET_SH(1) |
1570
			S_008020_SOFT_RESET_TC(1) |
1571
			S_008020_SOFT_RESET_TA(1) |
1572
			S_008020_SOFT_RESET_VC(1) |
1573
			S_008020_SOFT_RESET_VGT(1);
1574
	}
3764 Serge 1575
 
1576
	if (reset_mask & RADEON_RESET_CP) {
1577
		grbm_soft_reset |= S_008020_SOFT_RESET_CP(1) |
1578
			S_008020_SOFT_RESET_VGT(1);
1579
 
1580
		srbm_soft_reset |= S_000E60_SOFT_RESET_GRBM(1);
1581
	}
1582
 
1583
	if (reset_mask & RADEON_RESET_DMA) {
1584
		if (rdev->family >= CHIP_RV770)
1585
			srbm_soft_reset |= RV770_SOFT_RESET_DMA;
1586
		else
1587
			srbm_soft_reset |= SOFT_RESET_DMA;
1588
	}
1589
 
1590
	if (reset_mask & RADEON_RESET_RLC)
1591
		srbm_soft_reset |= S_000E60_SOFT_RESET_RLC(1);
1592
 
1593
	if (reset_mask & RADEON_RESET_SEM)
1594
		srbm_soft_reset |= S_000E60_SOFT_RESET_SEM(1);
1595
 
1596
	if (reset_mask & RADEON_RESET_IH)
1597
		srbm_soft_reset |= S_000E60_SOFT_RESET_IH(1);
1598
 
1599
	if (reset_mask & RADEON_RESET_GRBM)
1600
		srbm_soft_reset |= S_000E60_SOFT_RESET_GRBM(1);
1601
 
1602
	if (!(rdev->flags & RADEON_IS_IGP)) {
1603
		if (reset_mask & RADEON_RESET_MC)
1604
			srbm_soft_reset |= S_000E60_SOFT_RESET_MC(1);
1605
	}
1606
 
1607
	if (reset_mask & RADEON_RESET_VMC)
1608
		srbm_soft_reset |= S_000E60_SOFT_RESET_VMC(1);
1609
 
1610
	if (grbm_soft_reset) {
1611
		tmp = RREG32(R_008020_GRBM_SOFT_RESET);
1612
		tmp |= grbm_soft_reset;
1221 serge 1613
	dev_info(rdev->dev, "R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
1614
	WREG32(R_008020_GRBM_SOFT_RESET, tmp);
3764 Serge 1615
		tmp = RREG32(R_008020_GRBM_SOFT_RESET);
3192 Serge 1616
 
3764 Serge 1617
		udelay(50);
3192 Serge 1618
 
3764 Serge 1619
		tmp &= ~grbm_soft_reset;
1620
		WREG32(R_008020_GRBM_SOFT_RESET, tmp);
1621
		tmp = RREG32(R_008020_GRBM_SOFT_RESET);
1622
	}
3192 Serge 1623
 
3764 Serge 1624
	if (srbm_soft_reset) {
1625
		tmp = RREG32(SRBM_SOFT_RESET);
1626
		tmp |= srbm_soft_reset;
1627
		dev_info(rdev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
1628
		WREG32(SRBM_SOFT_RESET, tmp);
1629
		tmp = RREG32(SRBM_SOFT_RESET);
3192 Serge 1630
 
3764 Serge 1631
		udelay(50);
3192 Serge 1632
 
3764 Serge 1633
		tmp &= ~srbm_soft_reset;
1634
		WREG32(SRBM_SOFT_RESET, tmp);
1635
		tmp = RREG32(SRBM_SOFT_RESET);
1636
	}
3192 Serge 1637
 
3764 Serge 1638
	/* Wait a little for things to settle down */
1639
	mdelay(1);
3192 Serge 1640
 
3764 Serge 1641
	rv515_mc_resume(rdev, &save);
3192 Serge 1642
	udelay(50);
1643
 
3764 Serge 1644
	r600_print_gpu_status_regs(rdev);
3192 Serge 1645
}
1646
 
5078 serge 1647
static void r600_gpu_pci_config_reset(struct radeon_device *rdev)
1648
{
1649
	struct rv515_mc_save save;
1650
	u32 tmp, i;
1651
 
1652
	dev_info(rdev->dev, "GPU pci config reset\n");
1653
 
1654
	/* disable dpm? */
1655
 
1656
	/* Disable CP parsing/prefetching */
1657
	if (rdev->family >= CHIP_RV770)
1658
		WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1) | S_0086D8_CP_PFP_HALT(1));
1659
	else
1660
		WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
1661
 
1662
	/* disable the RLC */
1663
	WREG32(RLC_CNTL, 0);
1664
 
1665
	/* Disable DMA */
1666
	tmp = RREG32(DMA_RB_CNTL);
1667
	tmp &= ~DMA_RB_ENABLE;
1668
	WREG32(DMA_RB_CNTL, tmp);
1669
 
1670
	mdelay(50);
1671
 
1672
	/* set mclk/sclk to bypass */
1673
	if (rdev->family >= CHIP_RV770)
1674
		rv770_set_clk_bypass_mode(rdev);
1675
	/* disable BM */
1676
	pci_clear_master(rdev->pdev);
1677
	/* disable mem access */
1678
	rv515_mc_stop(rdev, &save);
1679
	if (r600_mc_wait_for_idle(rdev)) {
1680
		dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1681
	}
1682
 
1683
	/* BIF reset workaround.  Not sure if this is needed on 6xx */
1684
	tmp = RREG32(BUS_CNTL);
1685
	tmp |= VGA_COHE_SPEC_TIMER_DIS;
1686
	WREG32(BUS_CNTL, tmp);
1687
 
1688
	tmp = RREG32(BIF_SCRATCH0);
1689
 
1690
	/* reset */
1691
	radeon_pci_config_reset(rdev);
1692
	mdelay(1);
1693
 
1694
	/* BIF reset workaround.  Not sure if this is needed on 6xx */
1695
	tmp = SOFT_RESET_BIF;
1696
	WREG32(SRBM_SOFT_RESET, tmp);
1697
	mdelay(1);
1698
	WREG32(SRBM_SOFT_RESET, 0);
1699
 
1700
	/* wait for asic to come out of reset */
1701
	for (i = 0; i < rdev->usec_timeout; i++) {
1702
		if (RREG32(CONFIG_MEMSIZE) != 0xffffffff)
1703
			break;
1704
		udelay(1);
1705
	}
1706
}
1707
 
3764 Serge 1708
int r600_asic_reset(struct radeon_device *rdev)
3192 Serge 1709
{
3764 Serge 1710
	u32 reset_mask;
3192 Serge 1711
 
3764 Serge 1712
	reset_mask = r600_gpu_check_soft_reset(rdev);
3192 Serge 1713
 
3764 Serge 1714
	if (reset_mask)
1715
		r600_set_bios_scratch_engine_hung(rdev, true);
3192 Serge 1716
 
5078 serge 1717
	/* try soft reset */
3764 Serge 1718
	r600_gpu_soft_reset(rdev, reset_mask);
3192 Serge 1719
 
3764 Serge 1720
	reset_mask = r600_gpu_check_soft_reset(rdev);
3192 Serge 1721
 
5078 serge 1722
	/* try pci config reset */
1723
	if (reset_mask && radeon_hard_reset)
1724
		r600_gpu_pci_config_reset(rdev);
1725
 
1726
	reset_mask = r600_gpu_check_soft_reset(rdev);
1727
 
3764 Serge 1728
	if (!reset_mask)
1729
		r600_set_bios_scratch_engine_hung(rdev, false);
3192 Serge 1730
 
1221 serge 1731
	return 0;
1128 serge 1732
}
1733
 
3764 Serge 1734
/**
1735
 * r600_gfx_is_lockup - Check if the GFX engine is locked up
1736
 *
1737
 * @rdev: radeon_device pointer
1738
 * @ring: radeon_ring structure holding ring information
1739
 *
1740
 * Check if the GFX engine is locked up.
1741
 * Returns true if the engine appears to be locked up, false if not.
1742
 */
1743
bool r600_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
1221 serge 1744
{
3764 Serge 1745
	u32 reset_mask = r600_gpu_check_soft_reset(rdev);
1963 serge 1746
 
3764 Serge 1747
	if (!(reset_mask & (RADEON_RESET_GFX |
1748
			    RADEON_RESET_COMPUTE |
1749
			    RADEON_RESET_CP))) {
5078 serge 1750
		radeon_ring_lockup_update(rdev, ring);
1963 serge 1751
		return false;
1752
	}
2997 Serge 1753
	return radeon_ring_test_lockup(rdev, ring);
1963 serge 1754
}
1755
 
2997 Serge 1756
u32 r6xx_remap_render_backend(struct radeon_device *rdev,
1757
			      u32 tiling_pipe_num,
1758
			      u32 max_rb_num,
1759
			      u32 total_max_rb_num,
1760
			      u32 disabled_rb_mask)
1221 serge 1761
{
2997 Serge 1762
	u32 rendering_pipe_num, rb_num_width, req_rb_num;
3764 Serge 1763
	u32 pipe_rb_ratio, pipe_rb_remain, tmp;
2997 Serge 1764
	u32 data = 0, mask = 1 << (max_rb_num - 1);
1765
	unsigned i, j;
1221 serge 1766
 
2997 Serge 1767
	/* mask out the RBs that don't exist on that asic */
3764 Serge 1768
	tmp = disabled_rb_mask | ((0xff << max_rb_num) & 0xff);
1769
	/* make sure at least one RB is available */
1770
	if ((tmp & 0xff) != 0xff)
1771
		disabled_rb_mask = tmp;
1221 serge 1772
 
2997 Serge 1773
	rendering_pipe_num = 1 << tiling_pipe_num;
1774
	req_rb_num = total_max_rb_num - r600_count_pipe_bits(disabled_rb_mask);
1775
	BUG_ON(rendering_pipe_num < req_rb_num);
1221 serge 1776
 
2997 Serge 1777
	pipe_rb_ratio = rendering_pipe_num / req_rb_num;
1778
	pipe_rb_remain = rendering_pipe_num - pipe_rb_ratio * req_rb_num;
1221 serge 1779
 
2997 Serge 1780
	if (rdev->family <= CHIP_RV740) {
1781
		/* r6xx/r7xx */
1782
		rb_num_width = 2;
1783
	} else {
1784
		/* eg+ */
1785
		rb_num_width = 4;
1786
		}
1221 serge 1787
 
2997 Serge 1788
	for (i = 0; i < max_rb_num; i++) {
1789
		if (!(mask & disabled_rb_mask)) {
1790
			for (j = 0; j < pipe_rb_ratio; j++) {
1791
				data <<= rb_num_width;
1792
				data |= max_rb_num - i - 1;
1221 serge 1793
	}
2997 Serge 1794
			if (pipe_rb_remain) {
1795
				data <<= rb_num_width;
1796
				data |= max_rb_num - i - 1;
1797
				pipe_rb_remain--;
1221 serge 1798
	}
2997 Serge 1799
	}
1800
		mask >>= 1;
1801
	}
1221 serge 1802
 
2997 Serge 1803
	return data;
1221 serge 1804
}
1805
 
1806
int r600_count_pipe_bits(uint32_t val)
1807
{
3192 Serge 1808
	return hweight32(val);
1221 serge 1809
}
1810
 
2997 Serge 1811
static void r600_gpu_init(struct radeon_device *rdev)
1221 serge 1812
{
1813
	u32 tiling_config;
1814
	u32 ramcfg;
1430 serge 1815
	u32 cc_gc_shader_pipe_config;
1221 serge 1816
	u32 tmp;
1817
	int i, j;
1818
	u32 sq_config;
1819
	u32 sq_gpr_resource_mgmt_1 = 0;
1820
	u32 sq_gpr_resource_mgmt_2 = 0;
1821
	u32 sq_thread_resource_mgmt = 0;
1822
	u32 sq_stack_resource_mgmt_1 = 0;
1823
	u32 sq_stack_resource_mgmt_2 = 0;
2997 Serge 1824
	u32 disabled_rb_mask;
1221 serge 1825
 
2997 Serge 1826
	rdev->config.r600.tiling_group_size = 256;
1221 serge 1827
	switch (rdev->family) {
1828
	case CHIP_R600:
1829
		rdev->config.r600.max_pipes = 4;
1830
		rdev->config.r600.max_tile_pipes = 8;
1831
		rdev->config.r600.max_simds = 4;
1832
		rdev->config.r600.max_backends = 4;
1833
		rdev->config.r600.max_gprs = 256;
1834
		rdev->config.r600.max_threads = 192;
1835
		rdev->config.r600.max_stack_entries = 256;
1836
		rdev->config.r600.max_hw_contexts = 8;
1837
		rdev->config.r600.max_gs_threads = 16;
1838
		rdev->config.r600.sx_max_export_size = 128;
1839
		rdev->config.r600.sx_max_export_pos_size = 16;
1840
		rdev->config.r600.sx_max_export_smx_size = 128;
1841
		rdev->config.r600.sq_num_cf_insts = 2;
1842
		break;
1843
	case CHIP_RV630:
1844
	case CHIP_RV635:
1845
		rdev->config.r600.max_pipes = 2;
1846
		rdev->config.r600.max_tile_pipes = 2;
1847
		rdev->config.r600.max_simds = 3;
1848
		rdev->config.r600.max_backends = 1;
1849
		rdev->config.r600.max_gprs = 128;
1850
		rdev->config.r600.max_threads = 192;
1851
		rdev->config.r600.max_stack_entries = 128;
1852
		rdev->config.r600.max_hw_contexts = 8;
1853
		rdev->config.r600.max_gs_threads = 4;
1854
		rdev->config.r600.sx_max_export_size = 128;
1855
		rdev->config.r600.sx_max_export_pos_size = 16;
1856
		rdev->config.r600.sx_max_export_smx_size = 128;
1857
		rdev->config.r600.sq_num_cf_insts = 2;
1858
		break;
1859
	case CHIP_RV610:
1860
	case CHIP_RV620:
1861
	case CHIP_RS780:
1862
	case CHIP_RS880:
1863
		rdev->config.r600.max_pipes = 1;
1864
		rdev->config.r600.max_tile_pipes = 1;
1865
		rdev->config.r600.max_simds = 2;
1866
		rdev->config.r600.max_backends = 1;
1867
		rdev->config.r600.max_gprs = 128;
1868
		rdev->config.r600.max_threads = 192;
1869
		rdev->config.r600.max_stack_entries = 128;
1870
		rdev->config.r600.max_hw_contexts = 4;
1871
		rdev->config.r600.max_gs_threads = 4;
1872
		rdev->config.r600.sx_max_export_size = 128;
1873
		rdev->config.r600.sx_max_export_pos_size = 16;
1874
		rdev->config.r600.sx_max_export_smx_size = 128;
1875
		rdev->config.r600.sq_num_cf_insts = 1;
1876
		break;
1877
	case CHIP_RV670:
1878
		rdev->config.r600.max_pipes = 4;
1879
		rdev->config.r600.max_tile_pipes = 4;
1880
		rdev->config.r600.max_simds = 4;
1881
		rdev->config.r600.max_backends = 4;
1882
		rdev->config.r600.max_gprs = 192;
1883
		rdev->config.r600.max_threads = 192;
1884
		rdev->config.r600.max_stack_entries = 256;
1885
		rdev->config.r600.max_hw_contexts = 8;
1886
		rdev->config.r600.max_gs_threads = 16;
1887
		rdev->config.r600.sx_max_export_size = 128;
1888
		rdev->config.r600.sx_max_export_pos_size = 16;
1889
		rdev->config.r600.sx_max_export_smx_size = 128;
1890
		rdev->config.r600.sq_num_cf_insts = 2;
1891
		break;
1892
	default:
1893
		break;
1894
	}
1895
 
1896
	/* Initialize HDP */
1897
	for (i = 0, j = 0; i < 32; i++, j += 0x18) {
1898
		WREG32((0x2c14 + j), 0x00000000);
1899
		WREG32((0x2c18 + j), 0x00000000);
1900
		WREG32((0x2c1c + j), 0x00000000);
1901
		WREG32((0x2c20 + j), 0x00000000);
1902
		WREG32((0x2c24 + j), 0x00000000);
1903
	}
1904
 
1905
	WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
1906
 
1907
	/* Setup tiling */
1908
	tiling_config = 0;
1909
	ramcfg = RREG32(RAMCFG);
1910
	switch (rdev->config.r600.max_tile_pipes) {
1911
	case 1:
1912
		tiling_config |= PIPE_TILING(0);
1913
		break;
1914
	case 2:
1915
		tiling_config |= PIPE_TILING(1);
1916
		break;
1917
	case 4:
1918
		tiling_config |= PIPE_TILING(2);
1919
		break;
1920
	case 8:
1921
		tiling_config |= PIPE_TILING(3);
1922
		break;
1923
	default:
1924
		break;
1925
	}
1430 serge 1926
	rdev->config.r600.tiling_npipes = rdev->config.r600.max_tile_pipes;
1927
	rdev->config.r600.tiling_nbanks = 4 << ((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
1221 serge 1928
	tiling_config |= BANK_TILING((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
1963 serge 1929
	tiling_config |= GROUP_SIZE((ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT);
2997 Serge 1930
 
1221 serge 1931
	tmp = (ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT;
1932
	if (tmp > 3) {
1933
		tiling_config |= ROW_TILING(3);
1934
		tiling_config |= SAMPLE_SPLIT(3);
1935
	} else {
1936
		tiling_config |= ROW_TILING(tmp);
1937
		tiling_config |= SAMPLE_SPLIT(tmp);
1938
	}
1939
	tiling_config |= BANK_SWAPS(1);
1430 serge 1940
 
2997 Serge 1941
	cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0x00ffff00;
5078 serge 1942
	tmp = rdev->config.r600.max_simds -
2997 Serge 1943
		r600_count_pipe_bits((cc_gc_shader_pipe_config >> 16) & R6XX_MAX_SIMDS_MASK);
5078 serge 1944
	rdev->config.r600.active_simds = tmp;
1430 serge 1945
 
2997 Serge 1946
	disabled_rb_mask = (RREG32(CC_RB_BACKEND_DISABLE) >> 16) & R6XX_MAX_BACKENDS_MASK;
5078 serge 1947
	tmp = 0;
1948
	for (i = 0; i < rdev->config.r600.max_backends; i++)
1949
		tmp |= (1 << i);
1950
	/* if all the backends are disabled, fix it up here */
1951
	if ((disabled_rb_mask & tmp) == tmp) {
1952
		for (i = 0; i < rdev->config.r600.max_backends; i++)
1953
			disabled_rb_mask &= ~(1 << i);
1954
	}
2997 Serge 1955
	tmp = (tiling_config & PIPE_TILING__MASK) >> PIPE_TILING__SHIFT;
1956
	tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.r600.max_backends,
1957
					R6XX_MAX_BACKENDS, disabled_rb_mask);
1958
	tiling_config |= tmp << 16;
1959
	rdev->config.r600.backend_map = tmp;
1960
 
1963 serge 1961
	rdev->config.r600.tile_config = tiling_config;
1221 serge 1962
	WREG32(GB_TILING_CONFIG, tiling_config);
1963
	WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff);
1964
	WREG32(HDP_TILING_CONFIG, tiling_config & 0xffff);
3192 Serge 1965
	WREG32(DMA_TILING_CONFIG, tiling_config & 0xffff);
1221 serge 1966
 
1430 serge 1967
	tmp = R6XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
1221 serge 1968
	WREG32(VGT_OUT_DEALLOC_CNTL, (tmp * 4) & DEALLOC_DIST_MASK);
1969
	WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((tmp * 4) - 2) & VTX_REUSE_DEPTH_MASK);
1970
 
1971
	/* Setup some CP states */
1972
	WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) | ROQ_IB2_START(0x2b)));
1973
	WREG32(CP_MEQ_THRESHOLDS, (MEQ_END(0x40) | ROQ_END(0x40)));
1974
 
1975
	WREG32(TA_CNTL_AUX, (DISABLE_CUBE_ANISO | SYNC_GRADIENT |
1976
			     SYNC_WALKER | SYNC_ALIGNER));
1977
	/* Setup various GPU states */
1978
	if (rdev->family == CHIP_RV670)
1979
		WREG32(ARB_GDEC_RD_CNTL, 0x00000021);
1980
 
1981
	tmp = RREG32(SX_DEBUG_1);
1982
	tmp |= SMX_EVENT_RELEASE;
1983
	if ((rdev->family > CHIP_R600))
1984
		tmp |= ENABLE_NEW_SMX_ADDRESS;
1985
	WREG32(SX_DEBUG_1, tmp);
1986
 
1987
	if (((rdev->family) == CHIP_R600) ||
1988
	    ((rdev->family) == CHIP_RV630) ||
1989
	    ((rdev->family) == CHIP_RV610) ||
1990
	    ((rdev->family) == CHIP_RV620) ||
1268 serge 1991
	    ((rdev->family) == CHIP_RS780) ||
1992
	    ((rdev->family) == CHIP_RS880)) {
1221 serge 1993
		WREG32(DB_DEBUG, PREZ_MUST_WAIT_FOR_POSTZ_DONE);
1994
	} else {
1995
		WREG32(DB_DEBUG, 0);
1996
	}
1997
	WREG32(DB_WATERMARKS, (DEPTH_FREE(4) | DEPTH_CACHELINE_FREE(16) |
1998
			       DEPTH_FLUSH(16) | DEPTH_PENDING_FREE(4)));
1999
 
2000
	WREG32(PA_SC_MULTI_CHIP_CNTL, 0);
2001
	WREG32(VGT_NUM_INSTANCES, 0);
2002
 
2003
	WREG32(SPI_CONFIG_CNTL, GPR_WRITE_PRIORITY(0));
2004
	WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(0));
2005
 
2006
	tmp = RREG32(SQ_MS_FIFO_SIZES);
2007
	if (((rdev->family) == CHIP_RV610) ||
2008
	    ((rdev->family) == CHIP_RV620) ||
1268 serge 2009
	    ((rdev->family) == CHIP_RS780) ||
2010
	    ((rdev->family) == CHIP_RS880)) {
1221 serge 2011
		tmp = (CACHE_FIFO_SIZE(0xa) |
2012
		       FETCH_FIFO_HIWATER(0xa) |
2013
		       DONE_FIFO_HIWATER(0xe0) |
2014
		       ALU_UPDATE_FIFO_HIWATER(0x8));
2015
	} else if (((rdev->family) == CHIP_R600) ||
2016
		   ((rdev->family) == CHIP_RV630)) {
2017
		tmp &= ~DONE_FIFO_HIWATER(0xff);
2018
		tmp |= DONE_FIFO_HIWATER(0x4);
2019
	}
2020
	WREG32(SQ_MS_FIFO_SIZES, tmp);
2021
 
2022
	/* SQ_CONFIG, SQ_GPR_RESOURCE_MGMT, SQ_THREAD_RESOURCE_MGMT, SQ_STACK_RESOURCE_MGMT
2023
	 * should be adjusted as needed by the 2D/3D drivers.  This just sets default values
2024
	 */
2025
	sq_config = RREG32(SQ_CONFIG);
2026
	sq_config &= ~(PS_PRIO(3) |
2027
		       VS_PRIO(3) |
2028
		       GS_PRIO(3) |
2029
		       ES_PRIO(3));
2030
	sq_config |= (DX9_CONSTS |
2031
		      VC_ENABLE |
2032
		      PS_PRIO(0) |
2033
		      VS_PRIO(1) |
2034
		      GS_PRIO(2) |
2035
		      ES_PRIO(3));
2036
 
2037
	if ((rdev->family) == CHIP_R600) {
2038
		sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(124) |
2039
					  NUM_VS_GPRS(124) |
2040
					  NUM_CLAUSE_TEMP_GPRS(4));
2041
		sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(0) |
2042
					  NUM_ES_GPRS(0));
2043
		sq_thread_resource_mgmt = (NUM_PS_THREADS(136) |
2044
					   NUM_VS_THREADS(48) |
2045
					   NUM_GS_THREADS(4) |
2046
					   NUM_ES_THREADS(4));
2047
		sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(128) |
2048
					    NUM_VS_STACK_ENTRIES(128));
2049
		sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(0) |
2050
					    NUM_ES_STACK_ENTRIES(0));
2051
	} else if (((rdev->family) == CHIP_RV610) ||
2052
		   ((rdev->family) == CHIP_RV620) ||
1268 serge 2053
		   ((rdev->family) == CHIP_RS780) ||
2054
		   ((rdev->family) == CHIP_RS880)) {
1221 serge 2055
		/* no vertex cache */
2056
		sq_config &= ~VC_ENABLE;
2057
 
2058
		sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
2059
					  NUM_VS_GPRS(44) |
2060
					  NUM_CLAUSE_TEMP_GPRS(2));
2061
		sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(17) |
2062
					  NUM_ES_GPRS(17));
2063
		sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
2064
					   NUM_VS_THREADS(78) |
2065
					   NUM_GS_THREADS(4) |
2066
					   NUM_ES_THREADS(31));
2067
		sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(40) |
2068
					    NUM_VS_STACK_ENTRIES(40));
2069
		sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(32) |
2070
					    NUM_ES_STACK_ENTRIES(16));
2071
	} else if (((rdev->family) == CHIP_RV630) ||
2072
		   ((rdev->family) == CHIP_RV635)) {
2073
		sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
2074
					  NUM_VS_GPRS(44) |
2075
					  NUM_CLAUSE_TEMP_GPRS(2));
2076
		sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(18) |
2077
					  NUM_ES_GPRS(18));
2078
		sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
2079
					   NUM_VS_THREADS(78) |
2080
					   NUM_GS_THREADS(4) |
2081
					   NUM_ES_THREADS(31));
2082
		sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(40) |
2083
					    NUM_VS_STACK_ENTRIES(40));
2084
		sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(32) |
2085
					    NUM_ES_STACK_ENTRIES(16));
2086
	} else if ((rdev->family) == CHIP_RV670) {
2087
		sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
2088
					  NUM_VS_GPRS(44) |
2089
					  NUM_CLAUSE_TEMP_GPRS(2));
2090
		sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(17) |
2091
					  NUM_ES_GPRS(17));
2092
		sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
2093
					   NUM_VS_THREADS(78) |
2094
					   NUM_GS_THREADS(4) |
2095
					   NUM_ES_THREADS(31));
2096
		sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(64) |
2097
					    NUM_VS_STACK_ENTRIES(64));
2098
		sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(64) |
2099
					    NUM_ES_STACK_ENTRIES(64));
2100
	}
2101
 
2102
	WREG32(SQ_CONFIG, sq_config);
2103
	WREG32(SQ_GPR_RESOURCE_MGMT_1,  sq_gpr_resource_mgmt_1);
2104
	WREG32(SQ_GPR_RESOURCE_MGMT_2,  sq_gpr_resource_mgmt_2);
2105
	WREG32(SQ_THREAD_RESOURCE_MGMT, sq_thread_resource_mgmt);
2106
	WREG32(SQ_STACK_RESOURCE_MGMT_1, sq_stack_resource_mgmt_1);
2107
	WREG32(SQ_STACK_RESOURCE_MGMT_2, sq_stack_resource_mgmt_2);
2108
 
2109
	if (((rdev->family) == CHIP_RV610) ||
2110
	    ((rdev->family) == CHIP_RV620) ||
1268 serge 2111
	    ((rdev->family) == CHIP_RS780) ||
2112
	    ((rdev->family) == CHIP_RS880)) {
1221 serge 2113
		WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(TC_ONLY));
2114
	} else {
2115
		WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC));
2116
	}
2117
 
2118
	/* More default values. 2D/3D driver should adjust as needed */
2119
	WREG32(PA_SC_AA_SAMPLE_LOCS_2S, (S0_X(0xc) | S0_Y(0x4) |
2120
					 S1_X(0x4) | S1_Y(0xc)));
2121
	WREG32(PA_SC_AA_SAMPLE_LOCS_4S, (S0_X(0xe) | S0_Y(0xe) |
2122
					 S1_X(0x2) | S1_Y(0x2) |
2123
					 S2_X(0xa) | S2_Y(0x6) |
2124
					 S3_X(0x6) | S3_Y(0xa)));
2125
	WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD0, (S0_X(0xe) | S0_Y(0xb) |
2126
					     S1_X(0x4) | S1_Y(0xc) |
2127
					     S2_X(0x1) | S2_Y(0x6) |
2128
					     S3_X(0xa) | S3_Y(0xe)));
2129
	WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD1, (S4_X(0x6) | S4_Y(0x1) |
2130
					     S5_X(0x0) | S5_Y(0x0) |
2131
					     S6_X(0xb) | S6_Y(0x4) |
2132
					     S7_X(0x7) | S7_Y(0x8)));
2133
 
2134
	WREG32(VGT_STRMOUT_EN, 0);
2135
	tmp = rdev->config.r600.max_pipes * 16;
2136
	switch (rdev->family) {
2137
	case CHIP_RV610:
1268 serge 2138
	case CHIP_RV620:
1221 serge 2139
	case CHIP_RS780:
1268 serge 2140
	case CHIP_RS880:
1221 serge 2141
		tmp += 32;
2142
		break;
2143
	case CHIP_RV670:
2144
		tmp += 128;
2145
		break;
2146
	default:
2147
		break;
2148
	}
2149
	if (tmp > 256) {
2150
		tmp = 256;
2151
	}
2152
	WREG32(VGT_ES_PER_GS, 128);
2153
	WREG32(VGT_GS_PER_ES, tmp);
2154
	WREG32(VGT_GS_PER_VS, 2);
2155
	WREG32(VGT_GS_VERTEX_REUSE, 16);
2156
 
2157
	/* more default values. 2D/3D driver should adjust as needed */
2158
	WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
2159
	WREG32(VGT_STRMOUT_EN, 0);
2160
	WREG32(SX_MISC, 0);
2161
	WREG32(PA_SC_MODE_CNTL, 0);
2162
	WREG32(PA_SC_AA_CONFIG, 0);
2163
	WREG32(PA_SC_LINE_STIPPLE, 0);
2164
	WREG32(SPI_INPUT_Z, 0);
2165
	WREG32(SPI_PS_IN_CONTROL_0, NUM_INTERP(2));
2166
	WREG32(CB_COLOR7_FRAG, 0);
2167
 
2168
	/* Clear render buffer base addresses */
2169
	WREG32(CB_COLOR0_BASE, 0);
2170
	WREG32(CB_COLOR1_BASE, 0);
2171
	WREG32(CB_COLOR2_BASE, 0);
2172
	WREG32(CB_COLOR3_BASE, 0);
2173
	WREG32(CB_COLOR4_BASE, 0);
2174
	WREG32(CB_COLOR5_BASE, 0);
2175
	WREG32(CB_COLOR6_BASE, 0);
2176
	WREG32(CB_COLOR7_BASE, 0);
2177
	WREG32(CB_COLOR7_FRAG, 0);
2178
 
2179
	switch (rdev->family) {
2180
	case CHIP_RV610:
1268 serge 2181
	case CHIP_RV620:
1221 serge 2182
	case CHIP_RS780:
1268 serge 2183
	case CHIP_RS880:
1221 serge 2184
		tmp = TC_L2_SIZE(8);
2185
		break;
2186
	case CHIP_RV630:
2187
	case CHIP_RV635:
2188
		tmp = TC_L2_SIZE(4);
2189
		break;
2190
	case CHIP_R600:
2191
		tmp = TC_L2_SIZE(0) | L2_DISABLE_LATE_HIT;
2192
		break;
2193
	default:
2194
		tmp = TC_L2_SIZE(0);
2195
		break;
2196
	}
2197
	WREG32(TC_CNTL, tmp);
2198
 
2199
	tmp = RREG32(HDP_HOST_PATH_CNTL);
2200
	WREG32(HDP_HOST_PATH_CNTL, tmp);
2201
 
2202
	tmp = RREG32(ARB_POP);
2203
	tmp |= ENABLE_TC128;
2204
	WREG32(ARB_POP, tmp);
2205
 
2206
	WREG32(PA_SC_MULTI_CHIP_CNTL, 0);
2207
	WREG32(PA_CL_ENHANCE, (CLIP_VTX_REORDER_ENA |
2208
			       NUM_CLIP_SEQ(3)));
2209
	WREG32(PA_SC_ENHANCE, FORCE_EOV_MAX_CLK_CNT(4095));
2997 Serge 2210
	WREG32(VC_ENHANCE, 0);
1221 serge 2211
}
2212
 
2213
 
1128 serge 2214
/*
2215
 * Indirect registers accessor
2216
 */
1221 serge 2217
u32 r600_pciep_rreg(struct radeon_device *rdev, u32 reg)
1128 serge 2218
{
5078 serge 2219
	unsigned long flags;
1221 serge 2220
	u32 r;
1128 serge 2221
 
5078 serge 2222
	spin_lock_irqsave(&rdev->pciep_idx_lock, flags);
1221 serge 2223
	WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
2224
	(void)RREG32(PCIE_PORT_INDEX);
2225
	r = RREG32(PCIE_PORT_DATA);
5078 serge 2226
	spin_unlock_irqrestore(&rdev->pciep_idx_lock, flags);
1128 serge 2227
	return r;
2228
}
2229
 
1221 serge 2230
void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v)
1128 serge 2231
{
5078 serge 2232
	unsigned long flags;
2233
 
2234
	spin_lock_irqsave(&rdev->pciep_idx_lock, flags);
1221 serge 2235
	WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
2236
	(void)RREG32(PCIE_PORT_INDEX);
2237
	WREG32(PCIE_PORT_DATA, (v));
2238
	(void)RREG32(PCIE_PORT_DATA);
5078 serge 2239
	spin_unlock_irqrestore(&rdev->pciep_idx_lock, flags);
1128 serge 2240
}
1221 serge 2241
 
2242
/*
2243
 * CP & Ring
2244
 */
2245
void r600_cp_stop(struct radeon_device *rdev)
2246
{
5078 serge 2247
	if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX)
3192 Serge 2248
	radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
1221 serge 2249
	WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
1963 serge 2250
	WREG32(SCRATCH_UMSK, 0);
3192 Serge 2251
	rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
1221 serge 2252
}
1413 serge 2253
 
2254
int r600_init_microcode(struct radeon_device *rdev)
2255
{
2256
	const char *chip_name;
2257
	const char *rlc_chip_name;
5078 serge 2258
	const char *smc_chip_name = "RV770";
2259
	size_t pfp_req_size, me_req_size, rlc_req_size, smc_req_size = 0;
1413 serge 2260
	char fw_name[30];
2261
	int err;
2262
 
2263
	DRM_DEBUG("\n");
2264
 
2265
	switch (rdev->family) {
2266
	case CHIP_R600:
2267
		chip_name = "R600";
2268
		rlc_chip_name = "R600";
2269
		break;
2270
	case CHIP_RV610:
2271
		chip_name = "RV610";
2272
		rlc_chip_name = "R600";
2273
		break;
2274
	case CHIP_RV630:
2275
		chip_name = "RV630";
2276
		rlc_chip_name = "R600";
2277
		break;
2278
	case CHIP_RV620:
2279
		chip_name = "RV620";
2280
		rlc_chip_name = "R600";
2281
		break;
2282
	case CHIP_RV635:
2283
		chip_name = "RV635";
2284
		rlc_chip_name = "R600";
2285
		break;
2286
	case CHIP_RV670:
2287
		chip_name = "RV670";
2288
		rlc_chip_name = "R600";
2289
		break;
2290
	case CHIP_RS780:
2291
	case CHIP_RS880:
2292
		chip_name = "RS780";
2293
		rlc_chip_name = "R600";
2294
		break;
2295
	case CHIP_RV770:
2296
		chip_name = "RV770";
2297
		rlc_chip_name = "R700";
5078 serge 2298
		smc_chip_name = "RV770";
2299
		smc_req_size = ALIGN(RV770_SMC_UCODE_SIZE, 4);
1413 serge 2300
		break;
2301
	case CHIP_RV730:
2302
		chip_name = "RV730";
2303
		rlc_chip_name = "R700";
5078 serge 2304
		smc_chip_name = "RV730";
2305
		smc_req_size = ALIGN(RV730_SMC_UCODE_SIZE, 4);
1413 serge 2306
		break;
2307
	case CHIP_RV710:
2308
		chip_name = "RV710";
2309
		rlc_chip_name = "R700";
5078 serge 2310
		smc_chip_name = "RV710";
2311
		smc_req_size = ALIGN(RV710_SMC_UCODE_SIZE, 4);
1413 serge 2312
		break;
5078 serge 2313
	case CHIP_RV740:
2314
		chip_name = "RV730";
2315
		rlc_chip_name = "R700";
2316
		smc_chip_name = "RV740";
2317
		smc_req_size = ALIGN(RV740_SMC_UCODE_SIZE, 4);
2318
		break;
1963 serge 2319
	case CHIP_CEDAR:
2320
		chip_name = "CEDAR";
2321
		rlc_chip_name = "CEDAR";
5078 serge 2322
		smc_chip_name = "CEDAR";
2323
		smc_req_size = ALIGN(CEDAR_SMC_UCODE_SIZE, 4);
1963 serge 2324
		break;
2325
	case CHIP_REDWOOD:
2326
		chip_name = "REDWOOD";
2327
		rlc_chip_name = "REDWOOD";
5078 serge 2328
		smc_chip_name = "REDWOOD";
2329
		smc_req_size = ALIGN(REDWOOD_SMC_UCODE_SIZE, 4);
1963 serge 2330
		break;
2331
	case CHIP_JUNIPER:
2332
		chip_name = "JUNIPER";
2333
		rlc_chip_name = "JUNIPER";
5078 serge 2334
		smc_chip_name = "JUNIPER";
2335
		smc_req_size = ALIGN(JUNIPER_SMC_UCODE_SIZE, 4);
1963 serge 2336
		break;
2337
	case CHIP_CYPRESS:
2338
	case CHIP_HEMLOCK:
2339
		chip_name = "CYPRESS";
2340
		rlc_chip_name = "CYPRESS";
5078 serge 2341
		smc_chip_name = "CYPRESS";
2342
		smc_req_size = ALIGN(CYPRESS_SMC_UCODE_SIZE, 4);
1963 serge 2343
		break;
2344
	case CHIP_PALM:
2345
		chip_name = "PALM";
2346
		rlc_chip_name = "SUMO";
2347
		break;
1986 serge 2348
	case CHIP_SUMO:
2349
		chip_name = "SUMO";
2350
		rlc_chip_name = "SUMO";
2351
		break;
2352
	case CHIP_SUMO2:
2353
		chip_name = "SUMO2";
2354
		rlc_chip_name = "SUMO";
2355
		break;
1413 serge 2356
	default: BUG();
2357
	}
2358
 
1963 serge 2359
	if (rdev->family >= CHIP_CEDAR) {
2360
		pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4;
2361
		me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
2362
		rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
2363
	} else if (rdev->family >= CHIP_RV770) {
1413 serge 2364
		pfp_req_size = R700_PFP_UCODE_SIZE * 4;
2365
		me_req_size = R700_PM4_UCODE_SIZE * 4;
2366
		rlc_req_size = R700_RLC_UCODE_SIZE * 4;
2367
	} else {
5078 serge 2368
		pfp_req_size = R600_PFP_UCODE_SIZE * 4;
2369
		me_req_size = R600_PM4_UCODE_SIZE * 12;
2370
		rlc_req_size = R600_RLC_UCODE_SIZE * 4;
1413 serge 2371
	}
2372
 
2373
	DRM_INFO("Loading %s Microcode\n", chip_name);
2374
 
2375
	snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
5078 serge 2376
	err = request_firmware(&rdev->pfp_fw, fw_name, rdev->dev);
1413 serge 2377
	if (err)
2378
		goto out;
2379
	if (rdev->pfp_fw->size != pfp_req_size) {
2380
		printk(KERN_ERR
2381
		       "r600_cp: Bogus length %zu in firmware \"%s\"\n",
2382
		       rdev->pfp_fw->size, fw_name);
2383
		err = -EINVAL;
2384
		goto out;
2385
	}
2386
 
2387
	snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
5078 serge 2388
	err = request_firmware(&rdev->me_fw, fw_name, rdev->dev);
1413 serge 2389
	if (err)
2390
		goto out;
2391
	if (rdev->me_fw->size != me_req_size) {
2392
		printk(KERN_ERR
2393
		       "r600_cp: Bogus length %zu in firmware \"%s\"\n",
2394
		       rdev->me_fw->size, fw_name);
2395
		err = -EINVAL;
2396
	}
2397
 
2398
	snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name);
5078 serge 2399
	err = request_firmware(&rdev->rlc_fw, fw_name, rdev->dev);
1413 serge 2400
	if (err)
2401
		goto out;
2402
	if (rdev->rlc_fw->size != rlc_req_size) {
2403
		printk(KERN_ERR
2404
		       "r600_rlc: Bogus length %zu in firmware \"%s\"\n",
2405
		       rdev->rlc_fw->size, fw_name);
2406
		err = -EINVAL;
2407
	}
2408
 
5078 serge 2409
	if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_HEMLOCK)) {
2410
		snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", smc_chip_name);
2411
		err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev);
2412
		if (err) {
2413
			printk(KERN_ERR
2414
			       "smc: error loading firmware \"%s\"\n",
2415
			       fw_name);
2416
			release_firmware(rdev->smc_fw);
2417
			rdev->smc_fw = NULL;
2418
			err = 0;
2419
		} else if (rdev->smc_fw->size != smc_req_size) {
2420
			printk(KERN_ERR
2421
			       "smc: Bogus length %zu in firmware \"%s\"\n",
2422
			       rdev->smc_fw->size, fw_name);
2423
			err = -EINVAL;
2424
		}
2425
	}
2426
 
1413 serge 2427
out:
2428
	if (err) {
2429
		if (err != -EINVAL)
2430
			printk(KERN_ERR
2431
			       "r600_cp: Failed to load firmware \"%s\"\n",
2432
			       fw_name);
2433
		release_firmware(rdev->pfp_fw);
2434
		rdev->pfp_fw = NULL;
2435
		release_firmware(rdev->me_fw);
2436
		rdev->me_fw = NULL;
2437
		release_firmware(rdev->rlc_fw);
2438
		rdev->rlc_fw = NULL;
5078 serge 2439
		release_firmware(rdev->smc_fw);
2440
		rdev->smc_fw = NULL;
1413 serge 2441
	}
2442
	return err;
2443
}
2444
 
5078 serge 2445
u32 r600_gfx_get_rptr(struct radeon_device *rdev,
2446
		      struct radeon_ring *ring)
2447
{
2448
	u32 rptr;
2449
 
2450
	if (rdev->wb.enabled)
2451
		rptr = rdev->wb.wb[ring->rptr_offs/4];
2452
	else
2453
		rptr = RREG32(R600_CP_RB_RPTR);
2454
 
2455
	return rptr;
2456
}
2457
 
2458
u32 r600_gfx_get_wptr(struct radeon_device *rdev,
2459
		      struct radeon_ring *ring)
2460
{
2461
	u32 wptr;
2462
 
2463
	wptr = RREG32(R600_CP_RB_WPTR);
2464
 
2465
	return wptr;
2466
}
2467
 
2468
void r600_gfx_set_wptr(struct radeon_device *rdev,
2469
		       struct radeon_ring *ring)
2470
{
2471
	WREG32(R600_CP_RB_WPTR, ring->wptr);
2472
	(void)RREG32(R600_CP_RB_WPTR);
2473
}
2474
 
1413 serge 2475
static int r600_cp_load_microcode(struct radeon_device *rdev)
2476
{
2477
	const __be32 *fw_data;
2478
	int i;
2479
 
2480
	if (!rdev->me_fw || !rdev->pfp_fw)
2481
		return -EINVAL;
2482
 
2483
	r600_cp_stop(rdev);
2484
 
1963 serge 2485
	WREG32(CP_RB_CNTL,
2486
#ifdef __BIG_ENDIAN
2487
	       BUF_SWAP_32BIT |
2488
#endif
2489
	       RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3));
1413 serge 2490
 
2491
	/* Reset cp */
2492
	WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
2493
	RREG32(GRBM_SOFT_RESET);
2494
	mdelay(15);
2495
	WREG32(GRBM_SOFT_RESET, 0);
2496
 
2497
	WREG32(CP_ME_RAM_WADDR, 0);
2498
 
2499
	fw_data = (const __be32 *)rdev->me_fw->data;
2500
	WREG32(CP_ME_RAM_WADDR, 0);
5078 serge 2501
	for (i = 0; i < R600_PM4_UCODE_SIZE * 3; i++)
1413 serge 2502
		WREG32(CP_ME_RAM_DATA,
2503
		       be32_to_cpup(fw_data++));
2504
 
2505
	fw_data = (const __be32 *)rdev->pfp_fw->data;
2506
	WREG32(CP_PFP_UCODE_ADDR, 0);
5078 serge 2507
	for (i = 0; i < R600_PFP_UCODE_SIZE; i++)
1413 serge 2508
		WREG32(CP_PFP_UCODE_DATA,
2509
		       be32_to_cpup(fw_data++));
2510
 
2511
	WREG32(CP_PFP_UCODE_ADDR, 0);
2512
	WREG32(CP_ME_RAM_WADDR, 0);
2513
	WREG32(CP_ME_RAM_RADDR, 0);
2514
	return 0;
2515
}
2516
 
1221 serge 2517
int r600_cp_start(struct radeon_device *rdev)
2518
{
2997 Serge 2519
	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
1221 serge 2520
	int r;
2521
	uint32_t cp_me;
2522
 
2997 Serge 2523
	r = radeon_ring_lock(rdev, ring, 7);
1221 serge 2524
	if (r) {
2525
		DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
2526
		return r;
2527
	}
2997 Serge 2528
	radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5));
2529
	radeon_ring_write(ring, 0x1);
1963 serge 2530
	if (rdev->family >= CHIP_RV770) {
2997 Serge 2531
		radeon_ring_write(ring, 0x0);
2532
		radeon_ring_write(ring, rdev->config.rv770.max_hw_contexts - 1);
1963 serge 2533
	} else {
2997 Serge 2534
		radeon_ring_write(ring, 0x3);
2535
		radeon_ring_write(ring, rdev->config.r600.max_hw_contexts - 1);
1221 serge 2536
	}
2997 Serge 2537
	radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
2538
	radeon_ring_write(ring, 0);
2539
	radeon_ring_write(ring, 0);
5078 serge 2540
	radeon_ring_unlock_commit(rdev, ring, false);
1221 serge 2541
 
2542
	cp_me = 0xff;
2543
	WREG32(R_0086D8_CP_ME_CNTL, cp_me);
2544
	return 0;
2545
}
1413 serge 2546
 
2547
int r600_cp_resume(struct radeon_device *rdev)
2548
{
2997 Serge 2549
	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
1413 serge 2550
	u32 tmp;
2551
	u32 rb_bufsz;
2552
	int r;
2553
 
2554
	/* Reset cp */
2555
	WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
2556
	RREG32(GRBM_SOFT_RESET);
2557
	mdelay(15);
2558
	WREG32(GRBM_SOFT_RESET, 0);
2559
 
2560
	/* Set ring buffer size */
5078 serge 2561
	rb_bufsz = order_base_2(ring->ring_size / 8);
2562
	tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
1413 serge 2563
#ifdef __BIG_ENDIAN
2564
	tmp |= BUF_SWAP_32BIT;
2565
#endif
2566
	WREG32(CP_RB_CNTL, tmp);
2997 Serge 2567
	WREG32(CP_SEM_WAIT_TIMER, 0x0);
1413 serge 2568
 
2569
	/* Set the write pointer delay */
2570
	WREG32(CP_RB_WPTR_DELAY, 0);
2571
 
2572
	/* Initialize the ring buffer's read and write pointers */
2573
	WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
2574
	WREG32(CP_RB_RPTR_WR, 0);
2997 Serge 2575
	ring->wptr = 0;
2576
	WREG32(CP_RB_WPTR, ring->wptr);
1963 serge 2577
 
2578
	/* set the wb address whether it's enabled or not */
2579
	WREG32(CP_RB_RPTR_ADDR,
2580
	       ((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC));
2581
	WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
2582
	WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
2583
 
2584
	if (rdev->wb.enabled)
2585
		WREG32(SCRATCH_UMSK, 0xff);
2586
	else {
2587
		tmp |= RB_NO_UPDATE;
2588
		WREG32(SCRATCH_UMSK, 0);
2589
	}
2590
 
1413 serge 2591
	mdelay(1);
2592
	WREG32(CP_RB_CNTL, tmp);
2593
 
2997 Serge 2594
	WREG32(CP_RB_BASE, ring->gpu_addr >> 8);
1413 serge 2595
	WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
2596
 
2597
	r600_cp_start(rdev);
2997 Serge 2598
	ring->ready = true;
2599
	r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, ring);
1413 serge 2600
	if (r) {
2997 Serge 2601
		ring->ready = false;
1413 serge 2602
		return r;
2603
	}
5078 serge 2604
 
2605
	if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX)
2606
		radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
2607
 
1413 serge 2608
	return 0;
2609
}
2610
 
2997 Serge 2611
void r600_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ring_size)
1221 serge 2612
{
1233 serge 2613
	u32 rb_bufsz;
2997 Serge 2614
	int r;
1221 serge 2615
 
1233 serge 2616
	/* Align ring size */
5078 serge 2617
	rb_bufsz = order_base_2(ring_size / 8);
1233 serge 2618
	ring_size = (1 << (rb_bufsz + 1)) * 4;
2997 Serge 2619
	ring->ring_size = ring_size;
2620
	ring->align_mask = 16 - 1;
2621
 
2622
	if (radeon_ring_supports_scratch_reg(rdev, ring)) {
2623
		r = radeon_scratch_get(rdev, &ring->rptr_save_reg);
2624
		if (r) {
2625
			DRM_ERROR("failed to get scratch reg for rptr save (%d).\n", r);
2626
			ring->rptr_save_reg = 0;
2627
		}
2628
	}
1233 serge 2629
}
2630
 
1963 serge 2631
void r600_cp_fini(struct radeon_device *rdev)
2632
{
2997 Serge 2633
	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
1963 serge 2634
	r600_cp_stop(rdev);
2997 Serge 2635
	radeon_ring_fini(rdev, ring);
2636
	radeon_scratch_free(rdev, ring->rptr_save_reg);
1963 serge 2637
}
1233 serge 2638
 
3192 Serge 2639
/*
1233 serge 2640
 * GPU scratch registers helpers function.
2641
 */
2642
void r600_scratch_init(struct radeon_device *rdev)
2643
{
2644
	int i;
2645
 
2646
	rdev->scratch.num_reg = 7;
1963 serge 2647
	rdev->scratch.reg_base = SCRATCH_REG0;
1233 serge 2648
	for (i = 0; i < rdev->scratch.num_reg; i++) {
2649
		rdev->scratch.free[i] = true;
1963 serge 2650
		rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
1233 serge 2651
	}
2652
}
1413 serge 2653
 
2997 Serge 2654
int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
1413 serge 2655
{
2656
	uint32_t scratch;
2657
	uint32_t tmp = 0;
2658
	unsigned i;
2659
	int r;
2660
 
2661
	r = radeon_scratch_get(rdev, &scratch);
2662
	if (r) {
2663
		DRM_ERROR("radeon: cp failed to get scratch reg (%d).\n", r);
2664
		return r;
2665
	}
2666
	WREG32(scratch, 0xCAFEDEAD);
2997 Serge 2667
	r = radeon_ring_lock(rdev, ring, 3);
1413 serge 2668
	if (r) {
2997 Serge 2669
		DRM_ERROR("radeon: cp failed to lock ring %d (%d).\n", ring->idx, r);
1413 serge 2670
		radeon_scratch_free(rdev, scratch);
2671
		return r;
2672
	}
2997 Serge 2673
	radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2674
	radeon_ring_write(ring, ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
2675
	radeon_ring_write(ring, 0xDEADBEEF);
5078 serge 2676
	radeon_ring_unlock_commit(rdev, ring, false);
1413 serge 2677
	for (i = 0; i < rdev->usec_timeout; i++) {
2678
		tmp = RREG32(scratch);
2679
		if (tmp == 0xDEADBEEF)
2680
			break;
2681
		DRM_UDELAY(1);
2682
	}
2683
	if (i < rdev->usec_timeout) {
2997 Serge 2684
		DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
1413 serge 2685
	} else {
2997 Serge 2686
		DRM_ERROR("radeon: ring %d test failed (scratch(0x%04X)=0x%08X)\n",
2687
			  ring->idx, scratch, tmp);
1413 serge 2688
		r = -EINVAL;
2689
	}
2690
	radeon_scratch_free(rdev, scratch);
2691
	return r;
2692
}
1963 serge 2693
 
3192 Serge 2694
/*
2695
 * CP fences/semaphores
2696
 */
2697
 
1413 serge 2698
void r600_fence_ring_emit(struct radeon_device *rdev,
2699
			  struct radeon_fence *fence)
2700
{
2997 Serge 2701
	struct radeon_ring *ring = &rdev->ring[fence->ring];
5078 serge 2702
	u32 cp_coher_cntl = PACKET3_TC_ACTION_ENA | PACKET3_VC_ACTION_ENA |
2703
		PACKET3_SH_ACTION_ENA;
2997 Serge 2704
 
5078 serge 2705
	if (rdev->family >= CHIP_RV770)
2706
		cp_coher_cntl |= PACKET3_FULL_CACHE_ENA;
2707
 
1963 serge 2708
	if (rdev->wb.use_event) {
2997 Serge 2709
		u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
2710
		/* flush read cache over gart */
2711
		radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
5078 serge 2712
		radeon_ring_write(ring, cp_coher_cntl);
2997 Serge 2713
		radeon_ring_write(ring, 0xFFFFFFFF);
2714
		radeon_ring_write(ring, 0);
2715
		radeon_ring_write(ring, 10); /* poll interval */
1963 serge 2716
		/* EVENT_WRITE_EOP - flush caches, send int */
2997 Serge 2717
		radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
2718
		radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5));
5078 serge 2719
		radeon_ring_write(ring, lower_32_bits(addr));
2997 Serge 2720
		radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2));
2721
		radeon_ring_write(ring, fence->seq);
2722
		radeon_ring_write(ring, 0);
1963 serge 2723
	} else {
2997 Serge 2724
		/* flush read cache over gart */
2725
		radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
5078 serge 2726
		radeon_ring_write(ring, cp_coher_cntl);
2997 Serge 2727
		radeon_ring_write(ring, 0xFFFFFFFF);
2728
		radeon_ring_write(ring, 0);
2729
		radeon_ring_write(ring, 10); /* poll interval */
2730
		radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE, 0));
2731
		radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT) | EVENT_INDEX(0));
1430 serge 2732
	/* wait for 3D idle clean */
2997 Serge 2733
		radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2734
		radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
2735
		radeon_ring_write(ring, WAIT_3D_IDLE_bit | WAIT_3D_IDLECLEAN_bit);
1413 serge 2736
	/* Emit fence sequence & fire IRQ */
2997 Serge 2737
		radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2738
		radeon_ring_write(ring, ((rdev->fence_drv[fence->ring].scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
2739
		radeon_ring_write(ring, fence->seq);
1413 serge 2740
	/* CP_INTERRUPT packet 3 no longer exists, use packet 0 */
2997 Serge 2741
		radeon_ring_write(ring, PACKET0(CP_INT_STATUS, 0));
2742
		radeon_ring_write(ring, RB_INT_STAT);
1963 serge 2743
	}
1413 serge 2744
}
1963 serge 2745
 
5078 serge 2746
/**
2747
 * r600_semaphore_ring_emit - emit a semaphore on the CP ring
2748
 *
2749
 * @rdev: radeon_device pointer
2750
 * @ring: radeon ring buffer object
2751
 * @semaphore: radeon semaphore object
2752
 * @emit_wait: Is this a sempahore wait?
2753
 *
2754
 * Emits a semaphore signal/wait packet to the CP ring and prevents the PFP
2755
 * from running ahead of semaphore waits.
2756
 */
2757
bool r600_semaphore_ring_emit(struct radeon_device *rdev,
2997 Serge 2758
			      struct radeon_ring *ring,
2759
			      struct radeon_semaphore *semaphore,
2760
			      bool emit_wait)
2761
{
2762
	uint64_t addr = semaphore->gpu_addr;
2763
	unsigned sel = emit_wait ? PACKET3_SEM_SEL_WAIT : PACKET3_SEM_SEL_SIGNAL;
2764
 
2765
	if (rdev->family < CHIP_CAYMAN)
2766
		sel |= PACKET3_SEM_WAIT_ON_SIGNAL;
2767
 
2768
	radeon_ring_write(ring, PACKET3(PACKET3_MEM_SEMAPHORE, 1));
5078 serge 2769
	radeon_ring_write(ring, lower_32_bits(addr));
2997 Serge 2770
	radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | sel);
2771
 
5128 serge 2772
	/* PFP_SYNC_ME packet only exists on 7xx+, only enable it on eg+ */
2773
	if (emit_wait && (rdev->family >= CHIP_CEDAR)) {
5078 serge 2774
		/* Prevent the PFP from running ahead of the semaphore wait */
2775
		radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
2776
		radeon_ring_write(ring, 0x0);
2777
	}
3192 Serge 2778
 
5078 serge 2779
	return true;
3192 Serge 2780
}
2781
 
2782
/**
5078 serge 2783
 * r600_copy_cpdma - copy pages using the CP DMA engine
3192 Serge 2784
 *
2785
 * @rdev: radeon_device pointer
2786
 * @src_offset: src GPU address
2787
 * @dst_offset: dst GPU address
2788
 * @num_gpu_pages: number of GPU pages to xfer
2789
 * @fence: radeon fence object
2790
 *
5078 serge 2791
 * Copy GPU paging using the CP DMA engine (r6xx+).
3192 Serge 2792
 * Used by the radeon ttm implementation to move pages if
2793
 * registered as the asic copy callback.
2794
 */
5078 serge 2795
int r600_copy_cpdma(struct radeon_device *rdev,
3192 Serge 2796
		  uint64_t src_offset, uint64_t dst_offset,
2797
		  unsigned num_gpu_pages,
2798
		  struct radeon_fence **fence)
2799
{
2800
	struct radeon_semaphore *sem = NULL;
5078 serge 2801
	int ring_index = rdev->asic->copy.blit_ring_index;
3192 Serge 2802
	struct radeon_ring *ring = &rdev->ring[ring_index];
5078 serge 2803
	u32 size_in_bytes, cur_size_in_bytes, tmp;
3192 Serge 2804
	int i, num_loops;
2805
	int r = 0;
2806
 
2807
	r = radeon_semaphore_create(rdev, &sem);
2808
	if (r) {
2809
		DRM_ERROR("radeon: moving bo (%d).\n", r);
2810
		return r;
2811
	}
2812
 
5078 serge 2813
	size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
2814
	num_loops = DIV_ROUND_UP(size_in_bytes, 0x1fffff);
2815
	r = radeon_ring_lock(rdev, ring, num_loops * 6 + 24);
3192 Serge 2816
	if (r) {
2817
		DRM_ERROR("radeon: moving bo (%d).\n", r);
2818
		radeon_semaphore_free(rdev, &sem, NULL);
2819
		return r;
2820
	}
2821
 
5078 serge 2822
	radeon_semaphore_sync_to(sem, *fence);
2823
	radeon_semaphore_sync_rings(rdev, sem, ring->idx);
3192 Serge 2824
 
5078 serge 2825
	radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2826
	radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
2827
	radeon_ring_write(ring, WAIT_3D_IDLE_bit);
3192 Serge 2828
	for (i = 0; i < num_loops; i++) {
5078 serge 2829
		cur_size_in_bytes = size_in_bytes;
2830
		if (cur_size_in_bytes > 0x1fffff)
2831
			cur_size_in_bytes = 0x1fffff;
2832
		size_in_bytes -= cur_size_in_bytes;
2833
		tmp = upper_32_bits(src_offset) & 0xff;
2834
		if (size_in_bytes == 0)
2835
			tmp |= PACKET3_CP_DMA_CP_SYNC;
2836
		radeon_ring_write(ring, PACKET3(PACKET3_CP_DMA, 4));
2837
		radeon_ring_write(ring, lower_32_bits(src_offset));
2838
		radeon_ring_write(ring, tmp);
2839
		radeon_ring_write(ring, lower_32_bits(dst_offset));
2840
		radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
2841
		radeon_ring_write(ring, cur_size_in_bytes);
2842
		src_offset += cur_size_in_bytes;
2843
		dst_offset += cur_size_in_bytes;
3192 Serge 2844
	}
5078 serge 2845
	radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2846
	radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
2847
	radeon_ring_write(ring, WAIT_CP_DMA_IDLE_bit);
3192 Serge 2848
 
2849
	r = radeon_fence_emit(rdev, fence, ring->idx);
2850
	if (r) {
2851
		radeon_ring_unlock_undo(rdev, ring);
5078 serge 2852
		radeon_semaphore_free(rdev, &sem, NULL);
3192 Serge 2853
		return r;
2854
	}
2855
 
5078 serge 2856
	radeon_ring_unlock_commit(rdev, ring, false);
3192 Serge 2857
	radeon_semaphore_free(rdev, &sem, *fence);
2858
 
2859
	return r;
2860
}
2861
 
1221 serge 2862
int r600_set_surface_reg(struct radeon_device *rdev, int reg,
2863
			 uint32_t tiling_flags, uint32_t pitch,
2864
			 uint32_t offset, uint32_t obj_size)
2865
{
2866
	/* FIXME: implement */
2867
	return 0;
2868
}
2869
 
2870
void r600_clear_surface_reg(struct radeon_device *rdev, int reg)
2871
{
2872
	/* FIXME: implement */
2873
}
2874
 
2997 Serge 2875
static int r600_startup(struct radeon_device *rdev)
1221 serge 2876
{
3192 Serge 2877
	struct radeon_ring *ring;
1221 serge 2878
	int r;
2879
 
1963 serge 2880
	/* enable pcie gen2 link */
2881
	r600_pcie_gen2_enable(rdev);
2882
 
5078 serge 2883
	/* scratch needs to be initialized before MC */
3764 Serge 2884
	r = r600_vram_scratch_init(rdev);
2885
	if (r)
2886
		return r;
2887
 
1221 serge 2888
	r600_mc_program(rdev);
5078 serge 2889
 
1221 serge 2890
	if (rdev->flags & RADEON_IS_AGP) {
2891
		r600_agp_enable(rdev);
2892
	} else {
2893
		r = r600_pcie_gart_enable(rdev);
2894
		if (r)
2895
			return r;
2896
	}
2897
	r600_gpu_init(rdev);
2898
 
2005 serge 2899
	/* allocate wb buffer */
2900
	r = radeon_wb_init(rdev);
2901
	if (r)
2902
		return r;
2903
 
3192 Serge 2904
	r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
2905
	if (r) {
2906
		dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
2907
		return r;
2908
	}
2909
 
2005 serge 2910
	/* Enable IRQ */
3764 Serge 2911
	if (!rdev->irq.installed) {
2912
		r = radeon_irq_kms_init(rdev);
2913
		if (r)
2914
			return r;
2915
	}
2916
 
2005 serge 2917
	r = r600_irq_init(rdev);
2918
	if (r) {
2919
		DRM_ERROR("radeon: IH init failed (%d).\n", r);
2920
//		radeon_irq_kms_fini(rdev);
2921
		return r;
2922
	}
2923
	r600_irq_set(rdev);
2924
 
3192 Serge 2925
	ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
2997 Serge 2926
	r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
5078 serge 2927
			     RADEON_CP_PACKET2);
3192 Serge 2928
	if (r)
2929
		return r;
2997 Serge 2930
 
1413 serge 2931
	r = r600_cp_load_microcode(rdev);
2932
	if (r)
2933
		return r;
2934
	r = r600_cp_resume(rdev);
2935
	if (r)
2936
		return r;
1963 serge 2937
 
3192 Serge 2938
	r = radeon_ib_pool_init(rdev);
2939
	if (r) {
2940
		dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
2941
		return r;
2942
	}
5078 serge 2943
 
2944
 
1221 serge 2945
	return 0;
2946
}
2947
 
2948
void r600_vga_set_state(struct radeon_device *rdev, bool state)
2949
{
2950
	uint32_t temp;
2951
 
2952
	temp = RREG32(CONFIG_CNTL);
2953
	if (state == false) {
2954
		temp &= ~(1<<0);
2955
		temp |= (1<<1);
2956
	} else {
2957
		temp &= ~(1<<1);
2958
	}
2959
	WREG32(CONFIG_CNTL, temp);
2960
}
2961
 
2962
 
2963
 
2964
 
2965
 
2966
/* Plan is to move initialization in that function and use
2967
 * helper function so that radeon_device_init pretty much
2968
 * do nothing more than calling asic specific function. This
2969
 * should also allow to remove a bunch of callback function
2970
 * like vram_info.
2971
 */
2972
int r600_init(struct radeon_device *rdev)
2973
{
2974
	int r;
2975
 
2976
	if (r600_debugfs_mc_info_init(rdev)) {
2977
		DRM_ERROR("Failed to register debugfs file for mc !\n");
2978
	}
2979
	/* Read BIOS */
2980
	if (!radeon_get_bios(rdev)) {
2981
		if (ASIC_IS_AVIVO(rdev))
2982
			return -EINVAL;
2983
	}
2984
	/* Must be an ATOMBIOS */
2985
	if (!rdev->is_atom_bios) {
2986
		dev_err(rdev->dev, "Expecting atombios for R600 GPU\n");
2987
		return -EINVAL;
2988
	}
2989
	r = radeon_atombios_init(rdev);
2990
	if (r)
2991
		return r;
2992
	/* Post card if necessary */
1963 serge 2993
	if (!radeon_card_posted(rdev)) {
1321 serge 2994
		if (!rdev->bios) {
2995
			dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
2996
			return -EINVAL;
2997
		}
1221 serge 2998
		DRM_INFO("GPU not posted. posting now...\n");
2999
		atom_asic_init(rdev->mode_info.atom_context);
3000
	}
3001
	/* Initialize scratch registers */
3002
	r600_scratch_init(rdev);
3003
	/* Initialize surface registers */
3004
	radeon_surface_init(rdev);
1268 serge 3005
	/* Initialize clocks */
1221 serge 3006
	radeon_get_clock_info(rdev->ddev);
3007
	/* Fence driver */
2004 serge 3008
	r = radeon_fence_driver_init(rdev);
3009
	if (r)
3010
		return r;
1403 serge 3011
	if (rdev->flags & RADEON_IS_AGP) {
3012
		r = radeon_agp_init(rdev);
3013
		if (r)
3014
			radeon_agp_disable(rdev);
3015
	}
1221 serge 3016
	r = r600_mc_init(rdev);
3017
	if (r)
3018
		return r;
3019
	/* Memory manager */
1321 serge 3020
	r = radeon_bo_init(rdev);
1221 serge 3021
	if (r)
3022
		return r;
1321 serge 3023
 
5078 serge 3024
	if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
3025
		r = r600_init_microcode(rdev);
3026
		if (r) {
3027
			DRM_ERROR("Failed to load firmware!\n");
3028
			return r;
3029
		}
3030
	}
3031
 
3032
	/* Initialize power management */
3033
	radeon_pm_init(rdev);
3034
 
2997 Serge 3035
	rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
3036
	r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
1221 serge 3037
 
2004 serge 3038
	rdev->ih.ring_obj = NULL;
3039
	r600_ih_ring_init(rdev, 64 * 1024);
1221 serge 3040
 
3041
	r = r600_pcie_gart_init(rdev);
3042
	if (r)
3043
		return r;
3044
 
1321 serge 3045
	rdev->accel_working = true;
1221 serge 3046
	r = r600_startup(rdev);
3047
	if (r) {
1428 serge 3048
		dev_err(rdev->dev, "disabling GPU acceleration\n");
1221 serge 3049
		r600_pcie_gart_fini(rdev);
3050
		rdev->accel_working = false;
3051
	}
2005 serge 3052
 
1221 serge 3053
	return 0;
3054
}
3055
 
2004 serge 3056
/*
3057
 * CS stuff
3058
 */
3059
void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
3060
{
2997 Serge 3061
	struct radeon_ring *ring = &rdev->ring[ib->ring];
3062
	u32 next_rptr;
3063
 
3064
	if (ring->rptr_save_reg) {
3065
		next_rptr = ring->wptr + 3 + 4;
3066
		radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
3067
		radeon_ring_write(ring, ((ring->rptr_save_reg -
3068
					 PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
3069
		radeon_ring_write(ring, next_rptr);
3070
	} else if (rdev->wb.enabled) {
3071
		next_rptr = ring->wptr + 5 + 4;
3072
		radeon_ring_write(ring, PACKET3(PACKET3_MEM_WRITE, 3));
3073
		radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
3074
		radeon_ring_write(ring, (upper_32_bits(ring->next_rptr_gpu_addr) & 0xff) | (1 << 18));
3075
		radeon_ring_write(ring, next_rptr);
3076
		radeon_ring_write(ring, 0);
3077
	}
3078
 
3079
	radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
3080
	radeon_ring_write(ring,
2004 serge 3081
#ifdef __BIG_ENDIAN
3082
			  (2 << 0) |
3083
#endif
3084
			  (ib->gpu_addr & 0xFFFFFFFC));
2997 Serge 3085
	radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFF);
3086
	radeon_ring_write(ring, ib->length_dw);
2004 serge 3087
}
3088
 
2997 Serge 3089
int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
2004 serge 3090
{
2997 Serge 3091
	struct radeon_ib ib;
2004 serge 3092
	uint32_t scratch;
3093
	uint32_t tmp = 0;
3094
	unsigned i;
3095
	int r;
3096
 
3097
	r = radeon_scratch_get(rdev, &scratch);
3098
	if (r) {
3099
		DRM_ERROR("radeon: failed to get scratch reg (%d).\n", r);
3100
		return r;
3101
	}
3102
	WREG32(scratch, 0xCAFEDEAD);
2997 Serge 3103
	r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256);
2004 serge 3104
	if (r) {
3105
		DRM_ERROR("radeon: failed to get ib (%d).\n", r);
2997 Serge 3106
		goto free_scratch;
2004 serge 3107
	}
2997 Serge 3108
	ib.ptr[0] = PACKET3(PACKET3_SET_CONFIG_REG, 1);
3109
	ib.ptr[1] = ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
3110
	ib.ptr[2] = 0xDEADBEEF;
3111
	ib.length_dw = 3;
5078 serge 3112
	r = radeon_ib_schedule(rdev, &ib, NULL, false);
2004 serge 3113
	if (r) {
3114
		DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
2997 Serge 3115
		goto free_ib;
2004 serge 3116
	}
2997 Serge 3117
	r = radeon_fence_wait(ib.fence, false);
2004 serge 3118
	if (r) {
3119
		DRM_ERROR("radeon: fence wait failed (%d).\n", r);
2997 Serge 3120
		goto free_ib;
2004 serge 3121
	}
3122
	for (i = 0; i < rdev->usec_timeout; i++) {
3123
		tmp = RREG32(scratch);
3124
		if (tmp == 0xDEADBEEF)
3125
			break;
3126
		DRM_UDELAY(1);
3127
	}
3128
	if (i < rdev->usec_timeout) {
2997 Serge 3129
		DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib.fence->ring, i);
2004 serge 3130
	} else {
3131
		DRM_ERROR("radeon: ib test failed (scratch(0x%04X)=0x%08X)\n",
3132
			  scratch, tmp);
3133
		r = -EINVAL;
3134
	}
2997 Serge 3135
free_ib:
3136
	radeon_ib_free(rdev, &ib);
3137
free_scratch:
2004 serge 3138
	radeon_scratch_free(rdev, scratch);
3139
	return r;
3140
}
3141
 
3142
/*
3143
 * Interrupts
3144
 *
3145
 * Interrupts use a ring buffer on r6xx/r7xx hardware.  It works pretty
3146
 * the same as the CP ring buffer, but in reverse.  Rather than the CPU
3147
 * writing to the ring and the GPU consuming, the GPU writes to the ring
3148
 * and host consumes.  As the host irq handler processes interrupts, it
3149
 * increments the rptr.  When the rptr catches up with the wptr, all the
3150
 * current interrupts have been processed.
3151
 */
3152
 
3153
void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size)
3154
{
3155
	u32 rb_bufsz;
3156
 
3157
	/* Align ring size */
5078 serge 3158
	rb_bufsz = order_base_2(ring_size / 4);
2004 serge 3159
	ring_size = (1 << rb_bufsz) * 4;
3160
	rdev->ih.ring_size = ring_size;
3161
	rdev->ih.ptr_mask = rdev->ih.ring_size - 1;
3162
	rdev->ih.rptr = 0;
3163
}
3164
 
2997 Serge 3165
int r600_ih_ring_alloc(struct radeon_device *rdev)
2004 serge 3166
{
3167
	int r;
3168
 
3169
	/* Allocate ring buffer */
3170
	if (rdev->ih.ring_obj == NULL) {
3171
		r = radeon_bo_create(rdev, rdev->ih.ring_size,
3172
				     PAGE_SIZE, true,
5078 serge 3173
				     RADEON_GEM_DOMAIN_GTT, 0,
2997 Serge 3174
				     NULL, &rdev->ih.ring_obj);
2004 serge 3175
		if (r) {
3176
			DRM_ERROR("radeon: failed to create ih ring buffer (%d).\n", r);
3177
			return r;
3178
		}
3179
		r = radeon_bo_reserve(rdev->ih.ring_obj, false);
3180
		if (unlikely(r != 0))
3181
			return r;
3182
		r = radeon_bo_pin(rdev->ih.ring_obj,
3183
				  RADEON_GEM_DOMAIN_GTT,
3184
				  &rdev->ih.gpu_addr);
3185
		if (r) {
3186
			radeon_bo_unreserve(rdev->ih.ring_obj);
3187
			DRM_ERROR("radeon: failed to pin ih ring buffer (%d).\n", r);
3188
			return r;
3189
		}
3190
		r = radeon_bo_kmap(rdev->ih.ring_obj,
3191
				   (void **)&rdev->ih.ring);
3192
		radeon_bo_unreserve(rdev->ih.ring_obj);
3193
		if (r) {
3194
			DRM_ERROR("radeon: failed to map ih ring buffer (%d).\n", r);
3195
			return r;
3196
		}
3197
	}
3198
	return 0;
3199
}
3200
 
2997 Serge 3201
void r600_ih_ring_fini(struct radeon_device *rdev)
2004 serge 3202
{
3203
	int r;
3204
	if (rdev->ih.ring_obj) {
3205
		r = radeon_bo_reserve(rdev->ih.ring_obj, false);
3206
		if (likely(r == 0)) {
3207
			radeon_bo_kunmap(rdev->ih.ring_obj);
3208
			radeon_bo_unpin(rdev->ih.ring_obj);
3209
			radeon_bo_unreserve(rdev->ih.ring_obj);
3210
		}
3211
		radeon_bo_unref(&rdev->ih.ring_obj);
3212
		rdev->ih.ring = NULL;
3213
		rdev->ih.ring_obj = NULL;
3214
	}
3215
}
3216
 
3217
void r600_rlc_stop(struct radeon_device *rdev)
3218
{
3219
 
3220
	if ((rdev->family >= CHIP_RV770) &&
3221
	    (rdev->family <= CHIP_RV740)) {
3222
		/* r7xx asics need to soft reset RLC before halting */
3223
		WREG32(SRBM_SOFT_RESET, SOFT_RESET_RLC);
3224
		RREG32(SRBM_SOFT_RESET);
2997 Serge 3225
		mdelay(15);
2004 serge 3226
		WREG32(SRBM_SOFT_RESET, 0);
3227
		RREG32(SRBM_SOFT_RESET);
3228
	}
3229
 
3230
	WREG32(RLC_CNTL, 0);
3231
}
3232
 
3233
static void r600_rlc_start(struct radeon_device *rdev)
3234
{
3235
	WREG32(RLC_CNTL, RLC_ENABLE);
3236
}
3237
 
5078 serge 3238
static int r600_rlc_resume(struct radeon_device *rdev)
2004 serge 3239
{
3240
	u32 i;
3241
	const __be32 *fw_data;
3242
 
3243
	if (!rdev->rlc_fw)
3244
		return -EINVAL;
3245
 
3246
	r600_rlc_stop(rdev);
3247
 
2997 Serge 3248
	WREG32(RLC_HB_CNTL, 0);
3249
 
2004 serge 3250
	WREG32(RLC_HB_BASE, 0);
3251
	WREG32(RLC_HB_RPTR, 0);
3252
	WREG32(RLC_HB_WPTR, 0);
3253
		WREG32(RLC_HB_WPTR_LSB_ADDR, 0);
3254
		WREG32(RLC_HB_WPTR_MSB_ADDR, 0);
3255
	WREG32(RLC_MC_CNTL, 0);
3256
	WREG32(RLC_UCODE_CNTL, 0);
3257
 
3258
	fw_data = (const __be32 *)rdev->rlc_fw->data;
5078 serge 3259
	if (rdev->family >= CHIP_RV770) {
2004 serge 3260
		for (i = 0; i < R700_RLC_UCODE_SIZE; i++) {
3261
			WREG32(RLC_UCODE_ADDR, i);
3262
			WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
3263
		}
3264
	} else {
5078 serge 3265
		for (i = 0; i < R600_RLC_UCODE_SIZE; i++) {
2004 serge 3266
			WREG32(RLC_UCODE_ADDR, i);
3267
			WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
3268
		}
3269
	}
3270
	WREG32(RLC_UCODE_ADDR, 0);
3271
 
3272
	r600_rlc_start(rdev);
3273
 
3274
	return 0;
3275
}
3276
 
3277
static void r600_enable_interrupts(struct radeon_device *rdev)
3278
{
3279
	u32 ih_cntl = RREG32(IH_CNTL);
3280
	u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
3281
 
3282
	ih_cntl |= ENABLE_INTR;
3283
	ih_rb_cntl |= IH_RB_ENABLE;
3284
	WREG32(IH_CNTL, ih_cntl);
3285
	WREG32(IH_RB_CNTL, ih_rb_cntl);
3286
	rdev->ih.enabled = true;
3287
}
3288
 
3289
void r600_disable_interrupts(struct radeon_device *rdev)
3290
{
3291
	u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
3292
	u32 ih_cntl = RREG32(IH_CNTL);
3293
 
3294
	ih_rb_cntl &= ~IH_RB_ENABLE;
3295
	ih_cntl &= ~ENABLE_INTR;
3296
	WREG32(IH_RB_CNTL, ih_rb_cntl);
3297
	WREG32(IH_CNTL, ih_cntl);
3298
	/* set rptr, wptr to 0 */
3299
	WREG32(IH_RB_RPTR, 0);
3300
	WREG32(IH_RB_WPTR, 0);
3301
	rdev->ih.enabled = false;
3302
	rdev->ih.rptr = 0;
3303
}
3304
 
1963 serge 3305
static void r600_disable_interrupt_state(struct radeon_device *rdev)
3306
{
3307
	u32 tmp;
1221 serge 3308
 
1963 serge 3309
	WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
3192 Serge 3310
	tmp = RREG32(DMA_CNTL) & ~TRAP_ENABLE;
3311
	WREG32(DMA_CNTL, tmp);
1963 serge 3312
	WREG32(GRBM_INT_CNTL, 0);
3313
	WREG32(DxMODE_INT_MASK, 0);
3314
	WREG32(D1GRPH_INTERRUPT_CONTROL, 0);
3315
	WREG32(D2GRPH_INTERRUPT_CONTROL, 0);
3316
	if (ASIC_IS_DCE3(rdev)) {
3317
		WREG32(DCE3_DACA_AUTODETECT_INT_CONTROL, 0);
3318
		WREG32(DCE3_DACB_AUTODETECT_INT_CONTROL, 0);
3319
		tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY;
3320
		WREG32(DC_HPD1_INT_CONTROL, tmp);
3321
		tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY;
3322
		WREG32(DC_HPD2_INT_CONTROL, tmp);
3323
		tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY;
3324
		WREG32(DC_HPD3_INT_CONTROL, tmp);
3325
		tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY;
3326
		WREG32(DC_HPD4_INT_CONTROL, tmp);
3327
		if (ASIC_IS_DCE32(rdev)) {
3328
			tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY;
3329
			WREG32(DC_HPD5_INT_CONTROL, tmp);
3330
			tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY;
3331
			WREG32(DC_HPD6_INT_CONTROL, tmp);
2997 Serge 3332
			tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3333
			WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0, tmp);
3334
			tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3335
			WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1, tmp);
3336
		} else {
3337
			tmp = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3338
			WREG32(HDMI0_AUDIO_PACKET_CONTROL, tmp);
3339
			tmp = RREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3340
			WREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL, tmp);
1963 serge 3341
		}
3342
	} else {
3343
		WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
3344
		WREG32(DACB_AUTODETECT_INT_CONTROL, 0);
3345
		tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
3346
		WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
3347
		tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
3348
		WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
3349
		tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
3350
		WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
2997 Serge 3351
		tmp = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3352
		WREG32(HDMI0_AUDIO_PACKET_CONTROL, tmp);
3353
		tmp = RREG32(HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3354
		WREG32(HDMI1_AUDIO_PACKET_CONTROL, tmp);
1963 serge 3355
	}
3356
}
1221 serge 3357
 
2004 serge 3358
int r600_irq_init(struct radeon_device *rdev)
3359
{
3360
	int ret = 0;
3361
	int rb_bufsz;
3362
	u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
1221 serge 3363
 
2004 serge 3364
	/* allocate ring */
3365
	ret = r600_ih_ring_alloc(rdev);
3366
	if (ret)
3367
		return ret;
1221 serge 3368
 
2004 serge 3369
	/* disable irqs */
3370
	r600_disable_interrupts(rdev);
1221 serge 3371
 
2004 serge 3372
	/* init rlc */
5078 serge 3373
	if (rdev->family >= CHIP_CEDAR)
3374
		ret = evergreen_rlc_resume(rdev);
3375
	else
3376
		ret = r600_rlc_resume(rdev);
2004 serge 3377
	if (ret) {
3378
		r600_ih_ring_fini(rdev);
3379
		return ret;
3380
	}
1221 serge 3381
 
2004 serge 3382
	/* setup interrupt control */
3383
	/* set dummy read address to ring address */
3384
	WREG32(INTERRUPT_CNTL2, rdev->ih.gpu_addr >> 8);
3385
	interrupt_cntl = RREG32(INTERRUPT_CNTL);
3386
	/* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi
3387
	 * IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN
3388
	 */
3389
	interrupt_cntl &= ~IH_DUMMY_RD_OVERRIDE;
3390
	/* IH_REQ_NONSNOOP_EN=1 if ring is in non-cacheable memory, e.g., vram */
3391
	interrupt_cntl &= ~IH_REQ_NONSNOOP_EN;
3392
	WREG32(INTERRUPT_CNTL, interrupt_cntl);
1221 serge 3393
 
2004 serge 3394
	WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8);
5078 serge 3395
	rb_bufsz = order_base_2(rdev->ih.ring_size / 4);
1221 serge 3396
 
2004 serge 3397
	ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE |
3398
		      IH_WPTR_OVERFLOW_CLEAR |
3399
		      (rb_bufsz << 1));
1963 serge 3400
 
2004 serge 3401
	if (rdev->wb.enabled)
3402
		ih_rb_cntl |= IH_WPTR_WRITEBACK_ENABLE;
3403
 
3404
	/* set the writeback address whether it's enabled or not */
3405
	WREG32(IH_RB_WPTR_ADDR_LO, (rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFFFFFFFC);
3406
	WREG32(IH_RB_WPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFF);
3407
 
3408
	WREG32(IH_RB_CNTL, ih_rb_cntl);
3409
 
3410
	/* set rptr, wptr to 0 */
3411
	WREG32(IH_RB_RPTR, 0);
3412
	WREG32(IH_RB_WPTR, 0);
3413
 
3414
	/* Default settings for IH_CNTL (disabled at first) */
3415
	ih_cntl = MC_WRREQ_CREDIT(0x10) | MC_WR_CLEAN_CNT(0x10);
3416
	/* RPTR_REARM only works if msi's are enabled */
3417
	if (rdev->msi_enabled)
3418
		ih_cntl |= RPTR_REARM;
3419
	WREG32(IH_CNTL, ih_cntl);
3420
 
3421
	/* force the active interrupt state to all disabled */
3422
	if (rdev->family >= CHIP_CEDAR)
3423
		evergreen_disable_interrupt_state(rdev);
3424
	else
3425
		r600_disable_interrupt_state(rdev);
3426
 
2997 Serge 3427
	/* at this point everything should be setup correctly to enable master */
3428
	pci_set_master(rdev->pdev);
3429
 
2004 serge 3430
	/* enable irqs */
3431
	r600_enable_interrupts(rdev);
3432
 
3433
	return ret;
3434
}
3435
int r600_irq_set(struct radeon_device *rdev)
3436
{
3437
	u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE;
3438
	u32 mode_int = 0;
3439
	u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0;
3440
	u32 grbm_int_cntl = 0;
2997 Serge 3441
	u32 hdmi0, hdmi1;
3192 Serge 3442
	u32 dma_cntl;
5078 serge 3443
	u32 thermal_int = 0;
2004 serge 3444
 
3445
	if (!rdev->irq.installed) {
3446
		WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
3447
		return -EINVAL;
3448
	}
3449
	/* don't enable anything if the ih is disabled */
3450
	if (!rdev->ih.enabled) {
3451
		r600_disable_interrupts(rdev);
3452
		/* force the active interrupt state to all disabled */
3453
		r600_disable_interrupt_state(rdev);
3454
		return 0;
3455
	}
3456
 
3457
	if (ASIC_IS_DCE3(rdev)) {
3458
		hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
3459
		hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
3460
		hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
3461
		hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN;
3462
		if (ASIC_IS_DCE32(rdev)) {
3463
			hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
3464
			hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
2997 Serge 3465
			hdmi0 = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
3466
			hdmi1 = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
3467
		} else {
3468
			hdmi0 = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3469
			hdmi1 = RREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
2004 serge 3470
		}
3471
	} else {
3472
		hpd1 = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & ~DC_HPDx_INT_EN;
3473
		hpd2 = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & ~DC_HPDx_INT_EN;
3474
		hpd3 = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & ~DC_HPDx_INT_EN;
2997 Serge 3475
		hdmi0 = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3476
		hdmi1 = RREG32(HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
2004 serge 3477
	}
5078 serge 3478
 
3192 Serge 3479
	dma_cntl = RREG32(DMA_CNTL) & ~TRAP_ENABLE;
2004 serge 3480
 
5078 serge 3481
	if ((rdev->family > CHIP_R600) && (rdev->family < CHIP_RV770)) {
3482
		thermal_int = RREG32(CG_THERMAL_INT) &
3483
			~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW);
3484
	} else if (rdev->family >= CHIP_RV770) {
3485
		thermal_int = RREG32(RV770_CG_THERMAL_INT) &
3486
			~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW);
3487
	}
3488
	if (rdev->irq.dpm_thermal) {
3489
		DRM_DEBUG("dpm thermal\n");
3490
		thermal_int |= THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW;
3491
	}
3492
 
2997 Serge 3493
	if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
2004 serge 3494
		DRM_DEBUG("r600_irq_set: sw int\n");
3495
		cp_int_cntl |= RB_INT_ENABLE;
3496
		cp_int_cntl |= TIME_STAMP_INT_ENABLE;
3497
	}
3192 Serge 3498
 
3499
	if (atomic_read(&rdev->irq.ring_int[R600_RING_TYPE_DMA_INDEX])) {
3500
		DRM_DEBUG("r600_irq_set: sw int dma\n");
3501
		dma_cntl |= TRAP_ENABLE;
3502
	}
3503
 
2004 serge 3504
	if (rdev->irq.crtc_vblank_int[0] ||
2997 Serge 3505
	    atomic_read(&rdev->irq.pflip[0])) {
2004 serge 3506
		DRM_DEBUG("r600_irq_set: vblank 0\n");
3507
		mode_int |= D1MODE_VBLANK_INT_MASK;
3508
	}
3509
	if (rdev->irq.crtc_vblank_int[1] ||
2997 Serge 3510
	    atomic_read(&rdev->irq.pflip[1])) {
2004 serge 3511
		DRM_DEBUG("r600_irq_set: vblank 1\n");
3512
		mode_int |= D2MODE_VBLANK_INT_MASK;
3513
	}
3514
	if (rdev->irq.hpd[0]) {
3515
		DRM_DEBUG("r600_irq_set: hpd 1\n");
3516
		hpd1 |= DC_HPDx_INT_EN;
3517
	}
3518
	if (rdev->irq.hpd[1]) {
3519
		DRM_DEBUG("r600_irq_set: hpd 2\n");
3520
		hpd2 |= DC_HPDx_INT_EN;
3521
	}
3522
	if (rdev->irq.hpd[2]) {
3523
		DRM_DEBUG("r600_irq_set: hpd 3\n");
3524
		hpd3 |= DC_HPDx_INT_EN;
3525
	}
3526
	if (rdev->irq.hpd[3]) {
3527
		DRM_DEBUG("r600_irq_set: hpd 4\n");
3528
		hpd4 |= DC_HPDx_INT_EN;
3529
	}
3530
	if (rdev->irq.hpd[4]) {
3531
		DRM_DEBUG("r600_irq_set: hpd 5\n");
3532
		hpd5 |= DC_HPDx_INT_EN;
3533
	}
3534
	if (rdev->irq.hpd[5]) {
3535
		DRM_DEBUG("r600_irq_set: hpd 6\n");
3536
		hpd6 |= DC_HPDx_INT_EN;
3537
	}
2997 Serge 3538
	if (rdev->irq.afmt[0]) {
3539
		DRM_DEBUG("r600_irq_set: hdmi 0\n");
3540
		hdmi0 |= HDMI0_AZ_FORMAT_WTRIG_MASK;
2004 serge 3541
	}
2997 Serge 3542
	if (rdev->irq.afmt[1]) {
3543
		DRM_DEBUG("r600_irq_set: hdmi 0\n");
3544
		hdmi1 |= HDMI0_AZ_FORMAT_WTRIG_MASK;
2004 serge 3545
	}
3546
 
3547
	WREG32(CP_INT_CNTL, cp_int_cntl);
3192 Serge 3548
	WREG32(DMA_CNTL, dma_cntl);
2004 serge 3549
	WREG32(DxMODE_INT_MASK, mode_int);
5078 serge 3550
	WREG32(D1GRPH_INTERRUPT_CONTROL, DxGRPH_PFLIP_INT_MASK);
3551
	WREG32(D2GRPH_INTERRUPT_CONTROL, DxGRPH_PFLIP_INT_MASK);
2004 serge 3552
	WREG32(GRBM_INT_CNTL, grbm_int_cntl);
3553
	if (ASIC_IS_DCE3(rdev)) {
3554
		WREG32(DC_HPD1_INT_CONTROL, hpd1);
3555
		WREG32(DC_HPD2_INT_CONTROL, hpd2);
3556
		WREG32(DC_HPD3_INT_CONTROL, hpd3);
3557
		WREG32(DC_HPD4_INT_CONTROL, hpd4);
3558
		if (ASIC_IS_DCE32(rdev)) {
3559
			WREG32(DC_HPD5_INT_CONTROL, hpd5);
3560
			WREG32(DC_HPD6_INT_CONTROL, hpd6);
2997 Serge 3561
			WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0, hdmi0);
3562
			WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1, hdmi1);
3563
		} else {
3564
			WREG32(HDMI0_AUDIO_PACKET_CONTROL, hdmi0);
3565
			WREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL, hdmi1);
2004 serge 3566
		}
3567
	} else {
3568
		WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, hpd1);
3569
		WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2);
3570
		WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, hpd3);
2997 Serge 3571
		WREG32(HDMI0_AUDIO_PACKET_CONTROL, hdmi0);
3572
		WREG32(HDMI1_AUDIO_PACKET_CONTROL, hdmi1);
2004 serge 3573
	}
5078 serge 3574
	if ((rdev->family > CHIP_R600) && (rdev->family < CHIP_RV770)) {
3575
		WREG32(CG_THERMAL_INT, thermal_int);
3576
	} else if (rdev->family >= CHIP_RV770) {
3577
		WREG32(RV770_CG_THERMAL_INT, thermal_int);
3578
	}
2004 serge 3579
 
3580
	return 0;
3581
}
3582
 
2997 Serge 3583
static void r600_irq_ack(struct radeon_device *rdev)
2004 serge 3584
{
3585
	u32 tmp;
3586
 
3587
	if (ASIC_IS_DCE3(rdev)) {
3588
		rdev->irq.stat_regs.r600.disp_int = RREG32(DCE3_DISP_INTERRUPT_STATUS);
3589
		rdev->irq.stat_regs.r600.disp_int_cont = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE);
3590
		rdev->irq.stat_regs.r600.disp_int_cont2 = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE2);
2997 Serge 3591
		if (ASIC_IS_DCE32(rdev)) {
3592
			rdev->irq.stat_regs.r600.hdmi0_status = RREG32(AFMT_STATUS + DCE3_HDMI_OFFSET0);
3593
			rdev->irq.stat_regs.r600.hdmi1_status = RREG32(AFMT_STATUS + DCE3_HDMI_OFFSET1);
3594
		} else {
3595
			rdev->irq.stat_regs.r600.hdmi0_status = RREG32(HDMI0_STATUS);
3596
			rdev->irq.stat_regs.r600.hdmi1_status = RREG32(DCE3_HDMI1_STATUS);
3597
		}
2004 serge 3598
	} else {
3599
		rdev->irq.stat_regs.r600.disp_int = RREG32(DISP_INTERRUPT_STATUS);
3600
		rdev->irq.stat_regs.r600.disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
3601
		rdev->irq.stat_regs.r600.disp_int_cont2 = 0;
2997 Serge 3602
		rdev->irq.stat_regs.r600.hdmi0_status = RREG32(HDMI0_STATUS);
3603
		rdev->irq.stat_regs.r600.hdmi1_status = RREG32(HDMI1_STATUS);
2004 serge 3604
	}
3605
	rdev->irq.stat_regs.r600.d1grph_int = RREG32(D1GRPH_INTERRUPT_STATUS);
3606
	rdev->irq.stat_regs.r600.d2grph_int = RREG32(D2GRPH_INTERRUPT_STATUS);
3607
 
3608
	if (rdev->irq.stat_regs.r600.d1grph_int & DxGRPH_PFLIP_INT_OCCURRED)
3609
		WREG32(D1GRPH_INTERRUPT_STATUS, DxGRPH_PFLIP_INT_CLEAR);
3610
	if (rdev->irq.stat_regs.r600.d2grph_int & DxGRPH_PFLIP_INT_OCCURRED)
3611
		WREG32(D2GRPH_INTERRUPT_STATUS, DxGRPH_PFLIP_INT_CLEAR);
3612
	if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VBLANK_INTERRUPT)
3613
		WREG32(D1MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK);
3614
	if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VLINE_INTERRUPT)
3615
		WREG32(D1MODE_VLINE_STATUS, DxMODE_VLINE_ACK);
3616
	if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VBLANK_INTERRUPT)
3617
		WREG32(D2MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK);
3618
	if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VLINE_INTERRUPT)
3619
		WREG32(D2MODE_VLINE_STATUS, DxMODE_VLINE_ACK);
3620
	if (rdev->irq.stat_regs.r600.disp_int & DC_HPD1_INTERRUPT) {
3621
		if (ASIC_IS_DCE3(rdev)) {
3622
			tmp = RREG32(DC_HPD1_INT_CONTROL);
3623
			tmp |= DC_HPDx_INT_ACK;
3624
			WREG32(DC_HPD1_INT_CONTROL, tmp);
3625
		} else {
3626
			tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL);
3627
			tmp |= DC_HPDx_INT_ACK;
3628
			WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
3629
		}
3630
	}
3631
	if (rdev->irq.stat_regs.r600.disp_int & DC_HPD2_INTERRUPT) {
3632
		if (ASIC_IS_DCE3(rdev)) {
3633
			tmp = RREG32(DC_HPD2_INT_CONTROL);
3634
			tmp |= DC_HPDx_INT_ACK;
3635
			WREG32(DC_HPD2_INT_CONTROL, tmp);
3636
		} else {
3637
			tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL);
3638
			tmp |= DC_HPDx_INT_ACK;
3639
			WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
3640
		}
3641
	}
3642
	if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD3_INTERRUPT) {
3643
		if (ASIC_IS_DCE3(rdev)) {
3644
			tmp = RREG32(DC_HPD3_INT_CONTROL);
3645
			tmp |= DC_HPDx_INT_ACK;
3646
			WREG32(DC_HPD3_INT_CONTROL, tmp);
3647
		} else {
3648
			tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL);
3649
			tmp |= DC_HPDx_INT_ACK;
3650
			WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
3651
		}
3652
	}
3653
	if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD4_INTERRUPT) {
3654
		tmp = RREG32(DC_HPD4_INT_CONTROL);
3655
		tmp |= DC_HPDx_INT_ACK;
3656
		WREG32(DC_HPD4_INT_CONTROL, tmp);
3657
	}
3658
	if (ASIC_IS_DCE32(rdev)) {
3659
		if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD5_INTERRUPT) {
3660
			tmp = RREG32(DC_HPD5_INT_CONTROL);
3661
			tmp |= DC_HPDx_INT_ACK;
3662
			WREG32(DC_HPD5_INT_CONTROL, tmp);
3663
		}
3664
		if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT) {
3665
			tmp = RREG32(DC_HPD5_INT_CONTROL);
3666
			tmp |= DC_HPDx_INT_ACK;
3667
			WREG32(DC_HPD6_INT_CONTROL, tmp);
3668
		}
2997 Serge 3669
		if (rdev->irq.stat_regs.r600.hdmi0_status & AFMT_AZ_FORMAT_WTRIG) {
3670
			tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0);
3671
			tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
3672
			WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0, tmp);
3673
		}
3674
		if (rdev->irq.stat_regs.r600.hdmi1_status & AFMT_AZ_FORMAT_WTRIG) {
3675
			tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1);
3676
			tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
3677
			WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1, tmp);
2004 serge 3678
	}
2997 Serge 3679
	} else {
3680
		if (rdev->irq.stat_regs.r600.hdmi0_status & HDMI0_AZ_FORMAT_WTRIG) {
3681
			tmp = RREG32(HDMI0_AUDIO_PACKET_CONTROL);
3682
			tmp |= HDMI0_AZ_FORMAT_WTRIG_ACK;
3683
			WREG32(HDMI0_AUDIO_PACKET_CONTROL, tmp);
2004 serge 3684
	}
2997 Serge 3685
		if (rdev->irq.stat_regs.r600.hdmi1_status & HDMI0_AZ_FORMAT_WTRIG) {
2004 serge 3686
	if (ASIC_IS_DCE3(rdev)) {
2997 Serge 3687
				tmp = RREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL);
3688
				tmp |= HDMI0_AZ_FORMAT_WTRIG_ACK;
3689
				WREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL, tmp);
3690
			} else {
3691
				tmp = RREG32(HDMI1_AUDIO_PACKET_CONTROL);
3692
				tmp |= HDMI0_AZ_FORMAT_WTRIG_ACK;
3693
				WREG32(HDMI1_AUDIO_PACKET_CONTROL, tmp);
2004 serge 3694
		}
3695
		}
3696
	}
3697
}
3698
 
3192 Serge 3699
void r600_irq_disable(struct radeon_device *rdev)
3700
{
3701
	r600_disable_interrupts(rdev);
3702
	/* Wait and acknowledge irq */
3703
	mdelay(1);
3704
	r600_irq_ack(rdev);
3705
	r600_disable_interrupt_state(rdev);
3706
}
3707
 
2997 Serge 3708
static u32 r600_get_ih_wptr(struct radeon_device *rdev)
2004 serge 3709
{
3710
	u32 wptr, tmp;
3711
 
3712
	if (rdev->wb.enabled)
3713
		wptr = le32_to_cpu(rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4]);
3714
	else
3715
		wptr = RREG32(IH_RB_WPTR);
3716
 
3717
	if (wptr & RB_OVERFLOW) {
3718
		/* When a ring buffer overflow happen start parsing interrupt
3719
		 * from the last not overwritten vector (wptr + 16). Hopefully
3720
		 * this should allow us to catchup.
3721
		 */
3722
		dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n",
3723
			wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask);
3724
		rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
3725
		tmp = RREG32(IH_RB_CNTL);
3726
		tmp |= IH_WPTR_OVERFLOW_CLEAR;
3727
		WREG32(IH_RB_CNTL, tmp);
5078 serge 3728
		wptr &= ~RB_OVERFLOW;
2004 serge 3729
	}
3730
	return (wptr & rdev->ih.ptr_mask);
3731
}
3732
 
3733
/*        r600 IV Ring
3734
 * Each IV ring entry is 128 bits:
3735
 * [7:0]    - interrupt source id
3736
 * [31:8]   - reserved
3737
 * [59:32]  - interrupt source data
3738
 * [127:60]  - reserved
3739
 *
3740
 * The basic interrupt vector entries
3741
 * are decoded as follows:
3742
 * src_id  src_data  description
3743
 *      1         0  D1 Vblank
3744
 *      1         1  D1 Vline
3745
 *      5         0  D2 Vblank
3746
 *      5         1  D2 Vline
3747
 *     19         0  FP Hot plug detection A
3748
 *     19         1  FP Hot plug detection B
3749
 *     19         2  DAC A auto-detection
3750
 *     19         3  DAC B auto-detection
3751
 *     21         4  HDMI block A
3752
 *     21         5  HDMI block B
3753
 *    176         -  CP_INT RB
3754
 *    177         -  CP_INT IB1
3755
 *    178         -  CP_INT IB2
3756
 *    181         -  EOP Interrupt
3757
 *    233         -  GUI Idle
3758
 *
3759
 * Note, these are based on r600 and may need to be
3760
 * adjusted or added to on newer asics
3761
 */
3764 Serge 3762
#undef  DRM_DEBUG
2160 serge 3763
#define DRM_DEBUG(...)
3764
 
2004 serge 3765
int r600_irq_process(struct radeon_device *rdev)
3766
{
3767
	u32 wptr;
3768
	u32 rptr;
3769
	u32 src_id, src_data;
3770
	u32 ring_index;
3771
	bool queue_hotplug = false;
2997 Serge 3772
	bool queue_hdmi = false;
5078 serge 3773
	bool queue_thermal = false;
2004 serge 3774
 
3775
	if (!rdev->ih.enabled || rdev->shutdown)
3776
		return IRQ_NONE;
3777
 
2160 serge 3778
	/* No MSIs, need a dummy read to flush PCI DMAs */
3779
	if (!rdev->msi_enabled)
3780
		RREG32(IH_RB_WPTR);
3781
 
2004 serge 3782
	wptr = r600_get_ih_wptr(rdev);
3783
 
2997 Serge 3784
restart_ih:
3785
	/* is somebody else already processing irqs? */
3786
	if (atomic_xchg(&rdev->ih.lock, 1))
2004 serge 3787
		return IRQ_NONE;
3788
 
2997 Serge 3789
	rptr = rdev->ih.rptr;
3790
	DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
3791
 
2160 serge 3792
	/* Order reading of wptr vs. reading of IH ring data */
3793
	rmb();
3794
 
2004 serge 3795
	/* display interrupts */
3796
	r600_irq_ack(rdev);
3797
 
3798
	while (rptr != wptr) {
3799
		/* wptr/rptr are in bytes! */
3800
		ring_index = rptr / 4;
3801
		src_id = le32_to_cpu(rdev->ih.ring[ring_index]) & 0xff;
3802
		src_data = le32_to_cpu(rdev->ih.ring[ring_index + 1]) & 0xfffffff;
3803
 
3804
		switch (src_id) {
3805
		case 1: /* D1 vblank/vline */
3806
			switch (src_data) {
3807
			case 0: /* D1 vblank */
3808
				if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VBLANK_INTERRUPT) {
3809
					if (rdev->irq.crtc_vblank_int[0]) {
3810
//                       drm_handle_vblank(rdev->ddev, 0);
3811
						rdev->pm.vblank_sync = true;
3812
//                       wake_up(&rdev->irq.vblank_queue);
3813
					}
3814
//                   if (rdev->irq.pflip[0])
3815
//                       radeon_crtc_handle_flip(rdev, 0);
3816
					rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
3817
					DRM_DEBUG("IH: D1 vblank\n");
3818
				}
3819
				break;
3820
			case 1: /* D1 vline */
3821
				if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VLINE_INTERRUPT) {
3822
					rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VLINE_INTERRUPT;
3823
					DRM_DEBUG("IH: D1 vline\n");
3824
				}
3825
				break;
3826
			default:
3827
				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3828
				break;
3829
			}
3830
			break;
3831
		case 5: /* D2 vblank/vline */
3832
			switch (src_data) {
3833
			case 0: /* D2 vblank */
3834
				if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VBLANK_INTERRUPT) {
3835
					if (rdev->irq.crtc_vblank_int[1]) {
3836
//                       drm_handle_vblank(rdev->ddev, 1);
3837
						rdev->pm.vblank_sync = true;
3838
//                       wake_up(&rdev->irq.vblank_queue);
3839
					}
3840
//                   if (rdev->irq.pflip[1])
3841
//                       radeon_crtc_handle_flip(rdev, 1);
3842
					rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VBLANK_INTERRUPT;
3843
					DRM_DEBUG("IH: D2 vblank\n");
3844
				}
3845
				break;
3846
			case 1: /* D1 vline */
3847
				if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VLINE_INTERRUPT) {
3848
					rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VLINE_INTERRUPT;
3849
					DRM_DEBUG("IH: D2 vline\n");
3850
				}
3851
				break;
3852
			default:
3853
				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3854
				break;
3855
			}
3856
			break;
3857
		case 19: /* HPD/DAC hotplug */
3858
			switch (src_data) {
3859
			case 0:
3860
				if (rdev->irq.stat_regs.r600.disp_int & DC_HPD1_INTERRUPT) {
3861
					rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD1_INTERRUPT;
3862
					queue_hotplug = true;
3863
					DRM_DEBUG("IH: HPD1\n");
3864
				}
3865
				break;
3866
			case 1:
3867
				if (rdev->irq.stat_regs.r600.disp_int & DC_HPD2_INTERRUPT) {
3868
					rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD2_INTERRUPT;
3869
					queue_hotplug = true;
3870
					DRM_DEBUG("IH: HPD2\n");
3871
				}
3872
				break;
3873
			case 4:
3874
				if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD3_INTERRUPT) {
3875
					rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD3_INTERRUPT;
3876
					queue_hotplug = true;
3877
					DRM_DEBUG("IH: HPD3\n");
3878
				}
3879
				break;
3880
			case 5:
3881
				if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD4_INTERRUPT) {
3882
					rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD4_INTERRUPT;
3883
					queue_hotplug = true;
3884
					DRM_DEBUG("IH: HPD4\n");
3885
				}
3886
				break;
3887
			case 10:
3888
				if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD5_INTERRUPT) {
3889
					rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD5_INTERRUPT;
3890
					queue_hotplug = true;
3891
					DRM_DEBUG("IH: HPD5\n");
3892
				}
3893
				break;
3894
			case 12:
3895
				if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT) {
3896
					rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD6_INTERRUPT;
3897
					queue_hotplug = true;
3898
					DRM_DEBUG("IH: HPD6\n");
3899
				}
3900
				break;
3901
			default:
3902
				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3903
				break;
3904
			}
3905
			break;
2997 Serge 3906
		case 21: /* hdmi */
3907
			switch (src_data) {
3908
			case 4:
3909
				if (rdev->irq.stat_regs.r600.hdmi0_status & HDMI0_AZ_FORMAT_WTRIG) {
3910
					rdev->irq.stat_regs.r600.hdmi0_status &= ~HDMI0_AZ_FORMAT_WTRIG;
3911
					queue_hdmi = true;
3912
					DRM_DEBUG("IH: HDMI0\n");
3913
				}
3914
				break;
3915
			case 5:
3916
				if (rdev->irq.stat_regs.r600.hdmi1_status & HDMI0_AZ_FORMAT_WTRIG) {
3917
					rdev->irq.stat_regs.r600.hdmi1_status &= ~HDMI0_AZ_FORMAT_WTRIG;
3918
					queue_hdmi = true;
3919
					DRM_DEBUG("IH: HDMI1\n");
3920
				}
3921
				break;
3922
			default:
3923
				DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
3924
				break;
3925
			}
2004 serge 3926
			break;
5078 serge 3927
		case 124: /* UVD */
3928
			DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data);
3929
			radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX);
3930
			break;
2004 serge 3931
		case 176: /* CP_INT in ring buffer */
3932
		case 177: /* CP_INT in IB1 */
3933
		case 178: /* CP_INT in IB2 */
3934
			DRM_DEBUG("IH: CP int: 0x%08x\n", src_data);
2997 Serge 3935
			radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
2004 serge 3936
			break;
3937
		case 181: /* CP EOP event */
3938
			DRM_DEBUG("IH: CP EOP\n");
2997 Serge 3939
			radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
2004 serge 3940
			break;
3192 Serge 3941
		case 224: /* DMA trap event */
3942
			DRM_DEBUG("IH: DMA trap\n");
3943
			radeon_fence_process(rdev, R600_RING_TYPE_DMA_INDEX);
3944
			break;
5078 serge 3945
		case 230: /* thermal low to high */
3946
			DRM_DEBUG("IH: thermal low to high\n");
3947
			rdev->pm.dpm.thermal.high_to_low = false;
3948
			queue_thermal = true;
3949
			break;
3950
		case 231: /* thermal high to low */
3951
			DRM_DEBUG("IH: thermal high to low\n");
3952
			rdev->pm.dpm.thermal.high_to_low = true;
3953
			queue_thermal = true;
3954
			break;
2004 serge 3955
		case 233: /* GUI IDLE */
3956
			DRM_DEBUG("IH: GUI idle\n");
3957
			break;
3958
		default:
3959
			DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3960
			break;
3961
		}
3962
 
3963
		/* wptr/rptr are in bytes! */
3964
		rptr += 16;
3965
		rptr &= rdev->ih.ptr_mask;
3966
	}
2997 Serge 3967
	rdev->ih.rptr = rptr;
3968
	WREG32(IH_RB_RPTR, rdev->ih.rptr);
3969
	atomic_set(&rdev->ih.lock, 0);
3970
 
2004 serge 3971
	/* make sure wptr hasn't changed while processing */
3972
	wptr = r600_get_ih_wptr(rdev);
2997 Serge 3973
	if (wptr != rptr)
2004 serge 3974
		goto restart_ih;
2997 Serge 3975
 
2004 serge 3976
	return IRQ_HANDLED;
3977
}
3978
 
1221 serge 3979
/*
3980
 * Debugfs info
3981
 */
3982
#if defined(CONFIG_DEBUG_FS)
3983
 
3984
static int r600_debugfs_mc_info(struct seq_file *m, void *data)
3985
{
3986
	struct drm_info_node *node = (struct drm_info_node *) m->private;
3987
	struct drm_device *dev = node->minor->dev;
3988
	struct radeon_device *rdev = dev->dev_private;
3989
 
3990
	DREG32_SYS(m, rdev, R_000E50_SRBM_STATUS);
3991
	DREG32_SYS(m, rdev, VM_L2_STATUS);
3992
	return 0;
3993
}
3994
 
3995
static struct drm_info_list r600_mc_info_list[] = {
3996
	{"r600_mc_info", r600_debugfs_mc_info, 0, NULL},
3997
};
3998
#endif
3999
 
4000
int r600_debugfs_mc_info_init(struct radeon_device *rdev)
4001
{
4002
#if defined(CONFIG_DEBUG_FS)
4003
	return radeon_debugfs_add_files(rdev, r600_mc_info_list, ARRAY_SIZE(r600_mc_info_list));
4004
#else
4005
	return 0;
4006
#endif
4007
}
1404 serge 4008
 
4009
/**
5078 serge 4010
 * r600_mmio_hdp_flush - flush Host Data Path cache via MMIO
1404 serge 4011
 * rdev: radeon device structure
4012
 *
5078 serge 4013
 * Some R6XX/R7XX don't seem to take into account HDP flushes performed
4014
 * through the ring buffer. This leads to corruption in rendering, see
4015
 * http://bugzilla.kernel.org/show_bug.cgi?id=15186 . To avoid this, we
4016
 * directly perform the HDP flush by writing the register through MMIO.
1404 serge 4017
 */
5078 serge 4018
void r600_mmio_hdp_flush(struct radeon_device *rdev)
1404 serge 4019
{
1963 serge 4020
	/* r7xx hw bug.  write to HDP_DEBUG1 followed by fb read
4021
	 * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL.
4022
	 * This seems to cause problems on some AGP cards. Just use the old
4023
	 * method for them.
4024
	 */
4025
	if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) &&
4026
	    rdev->vram_scratch.ptr && !(rdev->flags & RADEON_IS_AGP)) {
4027
		void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
4028
		u32 tmp;
4029
 
4030
		WREG32(HDP_DEBUG1, 0);
4031
		tmp = readl((void __iomem *)ptr);
4032
	} else
1404 serge 4033
	WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
4034
}
1963 serge 4035
 
4036
void r600_set_pcie_lanes(struct radeon_device *rdev, int lanes)
4037
{
3764 Serge 4038
	u32 link_width_cntl, mask;
1963 serge 4039
 
4040
	if (rdev->flags & RADEON_IS_IGP)
4041
		return;
4042
 
4043
	if (!(rdev->flags & RADEON_IS_PCIE))
4044
		return;
4045
 
4046
	/* x2 cards have a special sequence */
4047
	if (ASIC_IS_X2(rdev))
4048
		return;
4049
 
3764 Serge 4050
	radeon_gui_idle(rdev);
1963 serge 4051
 
4052
	switch (lanes) {
4053
	case 0:
4054
		mask = RADEON_PCIE_LC_LINK_WIDTH_X0;
4055
		break;
4056
	case 1:
4057
		mask = RADEON_PCIE_LC_LINK_WIDTH_X1;
4058
		break;
4059
	case 2:
4060
		mask = RADEON_PCIE_LC_LINK_WIDTH_X2;
4061
		break;
4062
	case 4:
4063
		mask = RADEON_PCIE_LC_LINK_WIDTH_X4;
4064
		break;
4065
	case 8:
4066
		mask = RADEON_PCIE_LC_LINK_WIDTH_X8;
4067
		break;
4068
	case 12:
3764 Serge 4069
		/* not actually supported */
1963 serge 4070
		mask = RADEON_PCIE_LC_LINK_WIDTH_X12;
4071
		break;
4072
	case 16:
4073
		mask = RADEON_PCIE_LC_LINK_WIDTH_X16;
4074
		break;
3764 Serge 4075
	default:
4076
		DRM_ERROR("invalid pcie lane request: %d\n", lanes);
4077
		return;
1963 serge 4078
	}
4079
 
3764 Serge 4080
	link_width_cntl = RREG32_PCIE_PORT(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
4081
	link_width_cntl &= ~RADEON_PCIE_LC_LINK_WIDTH_MASK;
4082
	link_width_cntl |= mask << RADEON_PCIE_LC_LINK_WIDTH_SHIFT;
4083
	link_width_cntl |= (RADEON_PCIE_LC_RECONFIG_NOW |
1963 serge 4084
			     R600_PCIE_LC_RECONFIG_ARC_MISSING_ESCAPE);
4085
 
3764 Serge 4086
	WREG32_PCIE_PORT(RADEON_PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
1963 serge 4087
}
4088
 
4089
int r600_get_pcie_lanes(struct radeon_device *rdev)
4090
{
4091
	u32 link_width_cntl;
4092
 
4093
	if (rdev->flags & RADEON_IS_IGP)
4094
		return 0;
4095
 
4096
	if (!(rdev->flags & RADEON_IS_PCIE))
4097
		return 0;
4098
 
4099
	/* x2 cards have a special sequence */
4100
	if (ASIC_IS_X2(rdev))
4101
		return 0;
4102
 
3764 Serge 4103
	radeon_gui_idle(rdev);
1963 serge 4104
 
3764 Serge 4105
	link_width_cntl = RREG32_PCIE_PORT(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
1963 serge 4106
 
4107
	switch ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) >> RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT) {
4108
	case RADEON_PCIE_LC_LINK_WIDTH_X1:
4109
		return 1;
4110
	case RADEON_PCIE_LC_LINK_WIDTH_X2:
4111
		return 2;
4112
	case RADEON_PCIE_LC_LINK_WIDTH_X4:
4113
		return 4;
4114
	case RADEON_PCIE_LC_LINK_WIDTH_X8:
4115
		return 8;
3764 Serge 4116
	case RADEON_PCIE_LC_LINK_WIDTH_X12:
4117
		/* not actually supported */
4118
		return 12;
4119
	case RADEON_PCIE_LC_LINK_WIDTH_X0:
1963 serge 4120
	case RADEON_PCIE_LC_LINK_WIDTH_X16:
4121
	default:
4122
		return 16;
4123
	}
4124
}
4125
 
4126
static void r600_pcie_gen2_enable(struct radeon_device *rdev)
4127
{
4128
	u32 link_width_cntl, lanes, speed_cntl, training_cntl, tmp;
4129
	u16 link_cntl2;
4130
 
4131
	if (radeon_pcie_gen2 == 0)
4132
		return;
4133
 
4134
	if (rdev->flags & RADEON_IS_IGP)
4135
		return;
4136
 
4137
	if (!(rdev->flags & RADEON_IS_PCIE))
4138
		return;
4139
 
4140
	/* x2 cards have a special sequence */
4141
	if (ASIC_IS_X2(rdev))
4142
		return;
4143
 
4144
	/* only RV6xx+ chips are supported */
4145
	if (rdev->family <= CHIP_R600)
4146
		return;
4147
 
3764 Serge 4148
	if ((rdev->pdev->bus->max_bus_speed != PCIE_SPEED_5_0GT) &&
4149
		(rdev->pdev->bus->max_bus_speed != PCIE_SPEED_8_0GT))
2997 Serge 4150
		return;
4151
 
3764 Serge 4152
	speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
2997 Serge 4153
	if (speed_cntl & LC_CURRENT_DATA_RATE) {
4154
		DRM_INFO("PCIE gen 2 link speeds already enabled\n");
4155
		return;
4156
	}
4157
 
4158
	DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n");
4159
 
1963 serge 4160
	/* 55 nm r6xx asics */
4161
	if ((rdev->family == CHIP_RV670) ||
4162
	    (rdev->family == CHIP_RV620) ||
4163
	    (rdev->family == CHIP_RV635)) {
4164
		/* advertise upconfig capability */
3764 Serge 4165
		link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
1963 serge 4166
		link_width_cntl &= ~LC_UPCONFIGURE_DIS;
3764 Serge 4167
		WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
4168
		link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
1963 serge 4169
		if (link_width_cntl & LC_RENEGOTIATION_SUPPORT) {
4170
			lanes = (link_width_cntl & LC_LINK_WIDTH_RD_MASK) >> LC_LINK_WIDTH_RD_SHIFT;
4171
			link_width_cntl &= ~(LC_LINK_WIDTH_MASK |
4172
					     LC_RECONFIG_ARC_MISSING_ESCAPE);
4173
			link_width_cntl |= lanes | LC_RECONFIG_NOW | LC_RENEGOTIATE_EN;
3764 Serge 4174
			WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
1963 serge 4175
		} else {
4176
			link_width_cntl |= LC_UPCONFIGURE_DIS;
3764 Serge 4177
			WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
1963 serge 4178
		}
4179
	}
4180
 
3764 Serge 4181
	speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
1963 serge 4182
	if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) &&
4183
	    (speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) {
4184
 
4185
		/* 55 nm r6xx asics */
4186
		if ((rdev->family == CHIP_RV670) ||
4187
		    (rdev->family == CHIP_RV620) ||
4188
		    (rdev->family == CHIP_RV635)) {
4189
			WREG32(MM_CFGREGS_CNTL, 0x8);
4190
			link_cntl2 = RREG32(0x4088);
4191
			WREG32(MM_CFGREGS_CNTL, 0);
4192
			/* not supported yet */
4193
			if (link_cntl2 & SELECTABLE_DEEMPHASIS)
4194
				return;
4195
		}
4196
 
4197
		speed_cntl &= ~LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK;
4198
		speed_cntl |= (0x3 << LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT);
4199
		speed_cntl &= ~LC_VOLTAGE_TIMER_SEL_MASK;
4200
		speed_cntl &= ~LC_FORCE_DIS_HW_SPEED_CHANGE;
4201
		speed_cntl |= LC_FORCE_EN_HW_SPEED_CHANGE;
3764 Serge 4202
		WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
1963 serge 4203
 
4204
		tmp = RREG32(0x541c);
4205
		WREG32(0x541c, tmp | 0x8);
4206
		WREG32(MM_CFGREGS_CNTL, MM_WR_TO_CFG_EN);
4207
		link_cntl2 = RREG16(0x4088);
4208
		link_cntl2 &= ~TARGET_LINK_SPEED_MASK;
4209
		link_cntl2 |= 0x2;
4210
		WREG16(0x4088, link_cntl2);
4211
		WREG32(MM_CFGREGS_CNTL, 0);
4212
 
4213
		if ((rdev->family == CHIP_RV670) ||
4214
		    (rdev->family == CHIP_RV620) ||
4215
		    (rdev->family == CHIP_RV635)) {
3764 Serge 4216
			training_cntl = RREG32_PCIE_PORT(PCIE_LC_TRAINING_CNTL);
1963 serge 4217
			training_cntl &= ~LC_POINT_7_PLUS_EN;
3764 Serge 4218
			WREG32_PCIE_PORT(PCIE_LC_TRAINING_CNTL, training_cntl);
1963 serge 4219
		} else {
3764 Serge 4220
			speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
1963 serge 4221
			speed_cntl &= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN;
3764 Serge 4222
			WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
1963 serge 4223
		}
4224
 
3764 Serge 4225
		speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
1963 serge 4226
		speed_cntl |= LC_GEN2_EN_STRAP;
3764 Serge 4227
		WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
1963 serge 4228
 
4229
	} else {
3764 Serge 4230
		link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
1963 serge 4231
		/* XXX: only disable it if gen1 bridge vendor == 0x111d or 0x1106 */
4232
		if (1)
4233
			link_width_cntl |= LC_UPCONFIGURE_DIS;
4234
		else
4235
			link_width_cntl &= ~LC_UPCONFIGURE_DIS;
3764 Serge 4236
		WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
1963 serge 4237
	}
4238
}
2997 Serge 4239
 
4240
/**
3764 Serge 4241
 * r600_get_gpu_clock_counter - return GPU clock counter snapshot
2997 Serge 4242
 *
4243
 * @rdev: radeon_device pointer
4244
 *
4245
 * Fetches a GPU clock counter snapshot (R6xx-cayman).
4246
 * Returns the 64 bit clock counter snapshot.
4247
 */
3764 Serge 4248
uint64_t r600_get_gpu_clock_counter(struct radeon_device *rdev)
2997 Serge 4249
{
4250
	uint64_t clock;
4251
 
4252
	mutex_lock(&rdev->gpu_clock_mutex);
4253
	WREG32(RLC_CAPTURE_GPU_CLOCK_COUNT, 1);
4254
	clock = (uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_LSB) |
4255
	        ((uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
4256
	mutex_unlock(&rdev->gpu_clock_mutex);
4257
	return clock;
4258
}