Subversion Repositories Kolibri OS

Rev

Rev 6321 | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
1128 serge 1
/*
2
 * Copyright 2008 Advanced Micro Devices, Inc.
3
 * Copyright 2008 Red Hat Inc.
4
 * Copyright 2009 Jerome Glisse.
5
 *
6
 * Permission is hereby granted, free of charge, to any person obtaining a
7
 * copy of this software and associated documentation files (the "Software"),
8
 * to deal in the Software without restriction, including without limitation
9
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10
 * and/or sell copies of the Software, and to permit persons to whom the
11
 * Software is furnished to do so, subject to the following conditions:
12
 *
13
 * The above copyright notice and this permission notice shall be included in
14
 * all copies or substantial portions of the Software.
15
 *
16
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22
 * OTHER DEALINGS IN THE SOFTWARE.
23
 *
24
 * Authors: Dave Airlie
25
 *          Alex Deucher
26
 *          Jerome Glisse
27
 */
1963 serge 28
#include 
1233 serge 29
#include 
1221 serge 30
#include 
2997 Serge 31
#include 
32
#include 
33
#include 
1128 serge 34
#include "radeon.h"
1963 serge 35
#include "radeon_asic.h"
6104 serge 36
#include "radeon_audio.h"
1221 serge 37
#include "radeon_mode.h"
38
#include "r600d.h"
39
#include "atom.h"
40
#include "avivod.h"
5078 serge 41
#include "radeon_ucode.h"
1128 serge 42
 
1221 serge 43
/* Firmware Names */
44
MODULE_FIRMWARE("radeon/R600_pfp.bin");
45
MODULE_FIRMWARE("radeon/R600_me.bin");
46
MODULE_FIRMWARE("radeon/RV610_pfp.bin");
47
MODULE_FIRMWARE("radeon/RV610_me.bin");
48
MODULE_FIRMWARE("radeon/RV630_pfp.bin");
49
MODULE_FIRMWARE("radeon/RV630_me.bin");
50
MODULE_FIRMWARE("radeon/RV620_pfp.bin");
51
MODULE_FIRMWARE("radeon/RV620_me.bin");
52
MODULE_FIRMWARE("radeon/RV635_pfp.bin");
53
MODULE_FIRMWARE("radeon/RV635_me.bin");
54
MODULE_FIRMWARE("radeon/RV670_pfp.bin");
55
MODULE_FIRMWARE("radeon/RV670_me.bin");
56
MODULE_FIRMWARE("radeon/RS780_pfp.bin");
57
MODULE_FIRMWARE("radeon/RS780_me.bin");
58
MODULE_FIRMWARE("radeon/RV770_pfp.bin");
59
MODULE_FIRMWARE("radeon/RV770_me.bin");
5078 serge 60
MODULE_FIRMWARE("radeon/RV770_smc.bin");
1221 serge 61
MODULE_FIRMWARE("radeon/RV730_pfp.bin");
62
MODULE_FIRMWARE("radeon/RV730_me.bin");
5078 serge 63
MODULE_FIRMWARE("radeon/RV730_smc.bin");
64
MODULE_FIRMWARE("radeon/RV740_smc.bin");
1221 serge 65
MODULE_FIRMWARE("radeon/RV710_pfp.bin");
66
MODULE_FIRMWARE("radeon/RV710_me.bin");
5078 serge 67
MODULE_FIRMWARE("radeon/RV710_smc.bin");
1321 serge 68
MODULE_FIRMWARE("radeon/R600_rlc.bin");
69
MODULE_FIRMWARE("radeon/R700_rlc.bin");
1963 serge 70
MODULE_FIRMWARE("radeon/CEDAR_pfp.bin");
71
MODULE_FIRMWARE("radeon/CEDAR_me.bin");
72
MODULE_FIRMWARE("radeon/CEDAR_rlc.bin");
5078 serge 73
MODULE_FIRMWARE("radeon/CEDAR_smc.bin");
1963 serge 74
MODULE_FIRMWARE("radeon/REDWOOD_pfp.bin");
75
MODULE_FIRMWARE("radeon/REDWOOD_me.bin");
76
MODULE_FIRMWARE("radeon/REDWOOD_rlc.bin");
5078 serge 77
MODULE_FIRMWARE("radeon/REDWOOD_smc.bin");
1963 serge 78
MODULE_FIRMWARE("radeon/JUNIPER_pfp.bin");
79
MODULE_FIRMWARE("radeon/JUNIPER_me.bin");
80
MODULE_FIRMWARE("radeon/JUNIPER_rlc.bin");
5078 serge 81
MODULE_FIRMWARE("radeon/JUNIPER_smc.bin");
1963 serge 82
MODULE_FIRMWARE("radeon/CYPRESS_pfp.bin");
83
MODULE_FIRMWARE("radeon/CYPRESS_me.bin");
84
MODULE_FIRMWARE("radeon/CYPRESS_rlc.bin");
5078 serge 85
MODULE_FIRMWARE("radeon/CYPRESS_smc.bin");
1963 serge 86
MODULE_FIRMWARE("radeon/PALM_pfp.bin");
87
MODULE_FIRMWARE("radeon/PALM_me.bin");
88
MODULE_FIRMWARE("radeon/SUMO_rlc.bin");
89
MODULE_FIRMWARE("radeon/SUMO_pfp.bin");
90
MODULE_FIRMWARE("radeon/SUMO_me.bin");
91
MODULE_FIRMWARE("radeon/SUMO2_pfp.bin");
92
MODULE_FIRMWARE("radeon/SUMO2_me.bin");
1221 serge 93
 
3764 Serge 94
static const u32 crtc_offsets[2] =
95
{
96
	0,
97
	AVIVO_D2CRTC_H_TOTAL - AVIVO_D1CRTC_H_TOTAL
98
};
99
 
1221 serge 100
int r600_debugfs_mc_info_init(struct radeon_device *rdev);
101
 
102
/* r600,rv610,rv630,rv620,rv635,rv670 */
1128 serge 103
int r600_mc_wait_for_idle(struct radeon_device *rdev);
2997 Serge 104
static void r600_gpu_init(struct radeon_device *rdev);
1221 serge 105
void r600_fini(struct radeon_device *rdev);
1963 serge 106
void r600_irq_disable(struct radeon_device *rdev);
107
static void r600_pcie_gen2_enable(struct radeon_device *rdev);
5078 serge 108
extern int evergreen_rlc_resume(struct radeon_device *rdev);
109
extern void rv770_set_clk_bypass_mode(struct radeon_device *rdev);
1128 serge 110
 
6104 serge 111
/*
112
 * Indirect registers accessor
113
 */
114
u32 r600_rcu_rreg(struct radeon_device *rdev, u32 reg)
115
{
116
	unsigned long flags;
117
	u32 r;
118
 
119
	spin_lock_irqsave(&rdev->rcu_idx_lock, flags);
120
	WREG32(R600_RCU_INDEX, ((reg) & 0x1fff));
121
	r = RREG32(R600_RCU_DATA);
122
	spin_unlock_irqrestore(&rdev->rcu_idx_lock, flags);
123
	return r;
124
}
125
 
126
void r600_rcu_wreg(struct radeon_device *rdev, u32 reg, u32 v)
127
{
128
	unsigned long flags;
129
 
130
	spin_lock_irqsave(&rdev->rcu_idx_lock, flags);
131
	WREG32(R600_RCU_INDEX, ((reg) & 0x1fff));
132
	WREG32(R600_RCU_DATA, (v));
133
	spin_unlock_irqrestore(&rdev->rcu_idx_lock, flags);
134
}
135
 
136
u32 r600_uvd_ctx_rreg(struct radeon_device *rdev, u32 reg)
137
{
138
	unsigned long flags;
139
	u32 r;
140
 
141
	spin_lock_irqsave(&rdev->uvd_idx_lock, flags);
142
	WREG32(R600_UVD_CTX_INDEX, ((reg) & 0x1ff));
143
	r = RREG32(R600_UVD_CTX_DATA);
144
	spin_unlock_irqrestore(&rdev->uvd_idx_lock, flags);
145
	return r;
146
}
147
 
148
void r600_uvd_ctx_wreg(struct radeon_device *rdev, u32 reg, u32 v)
149
{
150
	unsigned long flags;
151
 
152
	spin_lock_irqsave(&rdev->uvd_idx_lock, flags);
153
	WREG32(R600_UVD_CTX_INDEX, ((reg) & 0x1ff));
154
	WREG32(R600_UVD_CTX_DATA, (v));
155
	spin_unlock_irqrestore(&rdev->uvd_idx_lock, flags);
156
}
157
 
3764 Serge 158
/**
6104 serge 159
 * r600_get_allowed_info_register - fetch the register for the info ioctl
160
 *
161
 * @rdev: radeon_device pointer
162
 * @reg: register offset in bytes
163
 * @val: register value
164
 *
165
 * Returns 0 for success or -EINVAL for an invalid register
166
 *
167
 */
168
int r600_get_allowed_info_register(struct radeon_device *rdev,
169
				   u32 reg, u32 *val)
170
{
171
	switch (reg) {
172
	case GRBM_STATUS:
173
	case GRBM_STATUS2:
174
	case R_000E50_SRBM_STATUS:
175
	case DMA_STATUS_REG:
176
	case UVD_STATUS:
177
		*val = RREG32(reg);
178
		return 0;
179
	default:
180
		return -EINVAL;
181
	}
182
}
183
 
184
/**
3764 Serge 185
 * r600_get_xclk - get the xclk
186
 *
187
 * @rdev: radeon_device pointer
188
 *
189
 * Returns the reference clock used by the gfx engine
190
 * (r6xx, IGPs, APUs).
191
 */
192
u32 r600_get_xclk(struct radeon_device *rdev)
193
{
194
	return rdev->clock.spll.reference_freq;
195
}
196
 
5078 serge 197
int r600_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
198
{
5271 serge 199
	unsigned fb_div = 0, ref_div, vclk_div = 0, dclk_div = 0;
200
	int r;
201
 
202
	/* bypass vclk and dclk with bclk */
203
	WREG32_P(CG_UPLL_FUNC_CNTL_2,
204
		 VCLK_SRC_SEL(1) | DCLK_SRC_SEL(1),
205
		 ~(VCLK_SRC_SEL_MASK | DCLK_SRC_SEL_MASK));
206
 
207
	/* assert BYPASS_EN, deassert UPLL_RESET, UPLL_SLEEP and UPLL_CTLREQ */
208
	WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_BYPASS_EN_MASK, ~(
209
		 UPLL_RESET_MASK | UPLL_SLEEP_MASK | UPLL_CTLREQ_MASK));
210
 
211
	if (rdev->family >= CHIP_RS780)
212
		WREG32_P(GFX_MACRO_BYPASS_CNTL, UPLL_BYPASS_CNTL,
213
			 ~UPLL_BYPASS_CNTL);
214
 
215
	if (!vclk || !dclk) {
216
		/* keep the Bypass mode, put PLL to sleep */
217
		WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_SLEEP_MASK, ~UPLL_SLEEP_MASK);
218
		return 0;
219
	}
220
 
221
	if (rdev->clock.spll.reference_freq == 10000)
222
		ref_div = 34;
223
	else
224
		ref_div = 4;
225
 
226
	r = radeon_uvd_calc_upll_dividers(rdev, vclk, dclk, 50000, 160000,
227
					  ref_div + 1, 0xFFF, 2, 30, ~0,
228
					  &fb_div, &vclk_div, &dclk_div);
229
	if (r)
230
		return r;
231
 
232
	if (rdev->family >= CHIP_RV670 && rdev->family < CHIP_RS780)
233
		fb_div >>= 1;
234
	else
235
		fb_div |= 1;
236
 
237
	r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL);
7146 serge 238
	if (r)
239
		return r;
5271 serge 240
 
241
	/* assert PLL_RESET */
242
	WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_RESET_MASK, ~UPLL_RESET_MASK);
243
 
244
	/* For RS780 we have to choose ref clk */
245
	if (rdev->family >= CHIP_RS780)
246
		WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_REFCLK_SRC_SEL_MASK,
247
			 ~UPLL_REFCLK_SRC_SEL_MASK);
248
 
249
	/* set the required fb, ref and post divder values */
250
	WREG32_P(CG_UPLL_FUNC_CNTL,
251
		 UPLL_FB_DIV(fb_div) |
252
		 UPLL_REF_DIV(ref_div),
253
		 ~(UPLL_FB_DIV_MASK | UPLL_REF_DIV_MASK));
254
	WREG32_P(CG_UPLL_FUNC_CNTL_2,
255
		 UPLL_SW_HILEN(vclk_div >> 1) |
256
		 UPLL_SW_LOLEN((vclk_div >> 1) + (vclk_div & 1)) |
257
		 UPLL_SW_HILEN2(dclk_div >> 1) |
258
		 UPLL_SW_LOLEN2((dclk_div >> 1) + (dclk_div & 1)) |
259
		 UPLL_DIVEN_MASK | UPLL_DIVEN2_MASK,
260
		 ~UPLL_SW_MASK);
261
 
262
	/* give the PLL some time to settle */
263
	mdelay(15);
264
 
265
	/* deassert PLL_RESET */
266
	WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_RESET_MASK);
267
 
268
	mdelay(15);
269
 
270
	/* deassert BYPASS EN */
271
	WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_BYPASS_EN_MASK);
272
 
273
	if (rdev->family >= CHIP_RS780)
274
		WREG32_P(GFX_MACRO_BYPASS_CNTL, 0, ~UPLL_BYPASS_CNTL);
275
 
276
	r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL);
277
	if (r)
278
		return r;
279
 
280
	/* switch VCLK and DCLK selection */
281
	WREG32_P(CG_UPLL_FUNC_CNTL_2,
282
		 VCLK_SRC_SEL(2) | DCLK_SRC_SEL(2),
283
		 ~(VCLK_SRC_SEL_MASK | DCLK_SRC_SEL_MASK));
284
 
285
	mdelay(100);
286
 
5078 serge 287
	return 0;
288
}
289
 
290
void dce3_program_fmt(struct drm_encoder *encoder)
291
{
292
	struct drm_device *dev = encoder->dev;
293
	struct radeon_device *rdev = dev->dev_private;
294
	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
295
	struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
296
	struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
297
	int bpc = 0;
298
	u32 tmp = 0;
299
	enum radeon_connector_dither dither = RADEON_FMT_DITHER_DISABLE;
300
 
301
	if (connector) {
302
		struct radeon_connector *radeon_connector = to_radeon_connector(connector);
303
		bpc = radeon_get_monitor_bpc(connector);
304
		dither = radeon_connector->dither;
305
	}
306
 
307
	/* LVDS FMT is set up by atom */
308
	if (radeon_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
309
		return;
310
 
311
	/* not needed for analog */
312
	if ((radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1) ||
313
	    (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2))
314
		return;
315
 
316
	if (bpc == 0)
317
		return;
318
 
319
	switch (bpc) {
320
	case 6:
321
		if (dither == RADEON_FMT_DITHER_ENABLE)
322
			/* XXX sort out optimal dither settings */
323
			tmp |= FMT_SPATIAL_DITHER_EN;
324
		else
325
			tmp |= FMT_TRUNCATE_EN;
326
		break;
327
	case 8:
328
		if (dither == RADEON_FMT_DITHER_ENABLE)
329
			/* XXX sort out optimal dither settings */
330
			tmp |= (FMT_SPATIAL_DITHER_EN | FMT_SPATIAL_DITHER_DEPTH);
331
		else
332
			tmp |= (FMT_TRUNCATE_EN | FMT_TRUNCATE_DEPTH);
333
		break;
334
	case 10:
335
	default:
336
		/* not needed */
337
		break;
338
	}
339
 
340
	WREG32(FMT_BIT_DEPTH_CONTROL + radeon_crtc->crtc_offset, tmp);
341
}
342
 
1963 serge 343
/* get temperature in millidegrees */
344
int rv6xx_get_temp(struct radeon_device *rdev)
345
{
346
	u32 temp = (RREG32(CG_THERMAL_STATUS) & ASIC_T_MASK) >>
347
		ASIC_T_SHIFT;
348
	int actual_temp = temp & 0xff;
349
 
350
	if (temp & 0x100)
351
		actual_temp -= 256;
352
 
353
	return actual_temp * 1000;
354
}
355
 
5078 serge 356
void r600_pm_get_dynpm_state(struct radeon_device *rdev)
357
{
358
	int i;
1963 serge 359
 
5078 serge 360
	rdev->pm.dynpm_can_upclock = true;
361
	rdev->pm.dynpm_can_downclock = true;
1963 serge 362
 
5078 serge 363
	/* power state array is low to high, default is first */
364
	if ((rdev->flags & RADEON_IS_IGP) || (rdev->family == CHIP_R600)) {
365
		int min_power_state_index = 0;
1963 serge 366
 
5078 serge 367
		if (rdev->pm.num_power_states > 2)
368
			min_power_state_index = 1;
1963 serge 369
 
5078 serge 370
		switch (rdev->pm.dynpm_planned_action) {
371
		case DYNPM_ACTION_MINIMUM:
372
			rdev->pm.requested_power_state_index = min_power_state_index;
373
			rdev->pm.requested_clock_mode_index = 0;
374
			rdev->pm.dynpm_can_downclock = false;
375
			break;
376
		case DYNPM_ACTION_DOWNCLOCK:
377
			if (rdev->pm.current_power_state_index == min_power_state_index) {
378
				rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
379
				rdev->pm.dynpm_can_downclock = false;
380
			} else {
381
				if (rdev->pm.active_crtc_count > 1) {
382
					for (i = 0; i < rdev->pm.num_power_states; i++) {
383
						if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
384
							continue;
385
						else if (i >= rdev->pm.current_power_state_index) {
386
							rdev->pm.requested_power_state_index =
387
								rdev->pm.current_power_state_index;
388
							break;
389
						} else {
390
							rdev->pm.requested_power_state_index = i;
391
							break;
392
						}
393
					}
394
				} else {
395
					if (rdev->pm.current_power_state_index == 0)
396
						rdev->pm.requested_power_state_index =
397
							rdev->pm.num_power_states - 1;
398
					else
399
						rdev->pm.requested_power_state_index =
400
							rdev->pm.current_power_state_index - 1;
401
				}
402
			}
403
			rdev->pm.requested_clock_mode_index = 0;
404
			/* don't use the power state if crtcs are active and no display flag is set */
405
			if ((rdev->pm.active_crtc_count > 0) &&
406
			    (rdev->pm.power_state[rdev->pm.requested_power_state_index].
407
			     clock_info[rdev->pm.requested_clock_mode_index].flags &
408
			     RADEON_PM_MODE_NO_DISPLAY)) {
409
				rdev->pm.requested_power_state_index++;
410
			}
411
			break;
412
		case DYNPM_ACTION_UPCLOCK:
413
			if (rdev->pm.current_power_state_index == (rdev->pm.num_power_states - 1)) {
414
				rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
415
				rdev->pm.dynpm_can_upclock = false;
416
			} else {
417
				if (rdev->pm.active_crtc_count > 1) {
418
					for (i = (rdev->pm.num_power_states - 1); i >= 0; i--) {
419
						if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
420
							continue;
421
						else if (i <= rdev->pm.current_power_state_index) {
422
							rdev->pm.requested_power_state_index =
423
								rdev->pm.current_power_state_index;
424
							break;
425
						} else {
426
							rdev->pm.requested_power_state_index = i;
427
							break;
428
						}
429
					}
430
				} else
431
					rdev->pm.requested_power_state_index =
432
						rdev->pm.current_power_state_index + 1;
433
			}
434
			rdev->pm.requested_clock_mode_index = 0;
435
			break;
436
		case DYNPM_ACTION_DEFAULT:
437
			rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index;
438
			rdev->pm.requested_clock_mode_index = 0;
439
			rdev->pm.dynpm_can_upclock = false;
440
			break;
441
		case DYNPM_ACTION_NONE:
442
		default:
443
			DRM_ERROR("Requested mode for not defined action\n");
444
			return;
445
		}
446
	} else {
447
		/* XXX select a power state based on AC/DC, single/dualhead, etc. */
448
		/* for now just select the first power state and switch between clock modes */
449
		/* power state array is low to high, default is first (0) */
450
		if (rdev->pm.active_crtc_count > 1) {
451
			rdev->pm.requested_power_state_index = -1;
452
			/* start at 1 as we don't want the default mode */
453
			for (i = 1; i < rdev->pm.num_power_states; i++) {
454
				if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
455
					continue;
456
				else if ((rdev->pm.power_state[i].type == POWER_STATE_TYPE_PERFORMANCE) ||
457
					 (rdev->pm.power_state[i].type == POWER_STATE_TYPE_BATTERY)) {
458
					rdev->pm.requested_power_state_index = i;
459
					break;
460
				}
461
			}
462
			/* if nothing selected, grab the default state. */
463
			if (rdev->pm.requested_power_state_index == -1)
464
				rdev->pm.requested_power_state_index = 0;
465
		} else
466
			rdev->pm.requested_power_state_index = 1;
1963 serge 467
 
5078 serge 468
		switch (rdev->pm.dynpm_planned_action) {
469
		case DYNPM_ACTION_MINIMUM:
470
			rdev->pm.requested_clock_mode_index = 0;
471
			rdev->pm.dynpm_can_downclock = false;
472
			break;
473
		case DYNPM_ACTION_DOWNCLOCK:
474
			if (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index) {
475
				if (rdev->pm.current_clock_mode_index == 0) {
476
					rdev->pm.requested_clock_mode_index = 0;
477
					rdev->pm.dynpm_can_downclock = false;
478
				} else
479
					rdev->pm.requested_clock_mode_index =
480
						rdev->pm.current_clock_mode_index - 1;
481
			} else {
482
				rdev->pm.requested_clock_mode_index = 0;
483
				rdev->pm.dynpm_can_downclock = false;
484
			}
485
			/* don't use the power state if crtcs are active and no display flag is set */
486
			if ((rdev->pm.active_crtc_count > 0) &&
487
			    (rdev->pm.power_state[rdev->pm.requested_power_state_index].
488
			     clock_info[rdev->pm.requested_clock_mode_index].flags &
489
			     RADEON_PM_MODE_NO_DISPLAY)) {
490
				rdev->pm.requested_clock_mode_index++;
491
			}
492
			break;
493
		case DYNPM_ACTION_UPCLOCK:
494
			if (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index) {
495
				if (rdev->pm.current_clock_mode_index ==
496
				    (rdev->pm.power_state[rdev->pm.requested_power_state_index].num_clock_modes - 1)) {
497
					rdev->pm.requested_clock_mode_index = rdev->pm.current_clock_mode_index;
498
					rdev->pm.dynpm_can_upclock = false;
499
				} else
500
					rdev->pm.requested_clock_mode_index =
501
						rdev->pm.current_clock_mode_index + 1;
502
			} else {
503
				rdev->pm.requested_clock_mode_index =
504
					rdev->pm.power_state[rdev->pm.requested_power_state_index].num_clock_modes - 1;
505
				rdev->pm.dynpm_can_upclock = false;
506
			}
507
			break;
508
		case DYNPM_ACTION_DEFAULT:
509
			rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index;
510
			rdev->pm.requested_clock_mode_index = 0;
511
			rdev->pm.dynpm_can_upclock = false;
512
			break;
513
		case DYNPM_ACTION_NONE:
514
		default:
515
			DRM_ERROR("Requested mode for not defined action\n");
516
			return;
517
		}
518
	}
519
 
520
	DRM_DEBUG_DRIVER("Requested: e: %d m: %d p: %d\n",
521
		  rdev->pm.power_state[rdev->pm.requested_power_state_index].
522
		  clock_info[rdev->pm.requested_clock_mode_index].sclk,
523
		  rdev->pm.power_state[rdev->pm.requested_power_state_index].
524
		  clock_info[rdev->pm.requested_clock_mode_index].mclk,
525
		  rdev->pm.power_state[rdev->pm.requested_power_state_index].
526
		  pcie_lanes);
527
}
528
 
529
void rs780_pm_init_profile(struct radeon_device *rdev)
530
{
531
	if (rdev->pm.num_power_states == 2) {
532
		/* default */
533
		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
534
		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
535
		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
536
		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
537
		/* low sh */
538
		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 0;
539
		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 0;
540
		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
541
		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
542
		/* mid sh */
543
		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 0;
544
		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 0;
545
		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
546
		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
547
		/* high sh */
548
		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0;
549
		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 1;
550
		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
551
		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
552
		/* low mh */
553
		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 0;
554
		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 0;
555
		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
556
		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
557
		/* mid mh */
558
		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 0;
559
		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 0;
560
		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
561
		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
562
		/* high mh */
563
		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0;
564
		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 1;
565
		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
566
		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
567
	} else if (rdev->pm.num_power_states == 3) {
568
		/* default */
569
		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
570
		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
571
		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
572
		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
573
		/* low sh */
574
		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 1;
575
		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1;
576
		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
577
		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
578
		/* mid sh */
579
		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 1;
580
		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 1;
581
		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
582
		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
583
		/* high sh */
584
		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 1;
585
		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 2;
586
		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
587
		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
588
		/* low mh */
589
		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 1;
590
		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 1;
591
		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
592
		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
593
		/* mid mh */
594
		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 1;
595
		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 1;
596
		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
597
		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
598
		/* high mh */
599
		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 1;
600
		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 2;
601
		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
602
		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
603
	} else {
604
		/* default */
605
		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
606
		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
607
		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
608
		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
609
		/* low sh */
610
		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 2;
611
		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 2;
612
		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
613
		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
614
		/* mid sh */
615
		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 2;
616
		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 2;
617
		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
618
		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
619
		/* high sh */
620
		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 2;
621
		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 3;
622
		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
623
		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
624
		/* low mh */
625
		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 2;
626
		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 0;
627
		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
628
		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
629
		/* mid mh */
630
		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 2;
631
		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 0;
632
		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
633
		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
634
		/* high mh */
635
		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 2;
636
		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 3;
637
		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
638
		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
639
	}
640
}
641
 
642
void r600_pm_init_profile(struct radeon_device *rdev)
643
{
644
	int idx;
645
 
646
	if (rdev->family == CHIP_R600) {
647
		/* XXX */
648
		/* default */
649
		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
650
		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
651
		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
652
		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
653
		/* low sh */
654
		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
655
		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
656
		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
657
		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
658
		/* mid sh */
659
		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
660
		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
661
		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
662
		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
663
		/* high sh */
664
		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
665
		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
666
		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
667
		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
668
		/* low mh */
669
		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
670
		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
671
		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
672
		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
673
		/* mid mh */
674
		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
675
		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
676
		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
677
		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
678
		/* high mh */
679
		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
680
		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
681
		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
682
		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
683
	} else {
684
		if (rdev->pm.num_power_states < 4) {
685
			/* default */
686
			rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
687
			rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
688
			rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
689
			rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2;
690
			/* low sh */
691
			rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 1;
692
			rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1;
693
			rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
694
			rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
695
			/* mid sh */
696
			rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 1;
697
			rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 1;
698
			rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
699
			rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
700
			/* high sh */
701
			rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 1;
702
			rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 1;
703
			rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
704
			rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2;
705
			/* low mh */
706
			rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 2;
707
			rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 2;
708
			rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
709
			rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
710
			/* low mh */
711
			rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 2;
712
			rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 2;
713
			rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
714
			rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
715
			/* high mh */
716
			rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 2;
717
			rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 2;
718
			rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
719
			rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2;
720
		} else {
721
			/* default */
722
			rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
723
			rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
724
			rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
725
			rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2;
726
			/* low sh */
727
			if (rdev->flags & RADEON_IS_MOBILITY)
728
				idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
729
			else
730
				idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
731
			rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = idx;
732
			rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = idx;
733
			rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
734
			rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
735
			/* mid sh */
736
			rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = idx;
737
			rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = idx;
738
			rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
739
			rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
740
			/* high sh */
741
			idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
742
			rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = idx;
743
			rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = idx;
744
			rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
745
			rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2;
746
			/* low mh */
747
			if (rdev->flags & RADEON_IS_MOBILITY)
748
				idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1);
749
			else
750
				idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
751
			rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = idx;
752
			rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = idx;
753
			rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
754
			rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
755
			/* mid mh */
756
			rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = idx;
757
			rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = idx;
758
			rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
759
			rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
760
			/* high mh */
761
			idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
762
			rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = idx;
763
			rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = idx;
764
			rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
765
			rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2;
766
		}
767
	}
768
}
769
 
770
void r600_pm_misc(struct radeon_device *rdev)
771
{
772
	int req_ps_idx = rdev->pm.requested_power_state_index;
773
	int req_cm_idx = rdev->pm.requested_clock_mode_index;
774
	struct radeon_power_state *ps = &rdev->pm.power_state[req_ps_idx];
775
	struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage;
776
 
777
	if ((voltage->type == VOLTAGE_SW) && voltage->voltage) {
778
		/* 0xff01 is a flag rather then an actual voltage */
779
		if (voltage->voltage == 0xff01)
780
			return;
781
		if (voltage->voltage != rdev->pm.current_vddc) {
782
			radeon_atom_set_voltage(rdev, voltage->voltage, SET_VOLTAGE_TYPE_ASIC_VDDC);
783
			rdev->pm.current_vddc = voltage->voltage;
784
			DRM_DEBUG_DRIVER("Setting: v: %d\n", voltage->voltage);
785
		}
786
	}
787
}
788
 
1963 serge 789
bool r600_gui_idle(struct radeon_device *rdev)
790
{
791
	if (RREG32(GRBM_STATUS) & GUI_ACTIVE)
792
		return false;
793
	else
794
		return true;
795
}
796
 
1321 serge 797
/* hpd for digital panel detect/disconnect */
798
bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
799
{
800
	bool connected = false;
801
 
802
	if (ASIC_IS_DCE3(rdev)) {
803
		switch (hpd) {
804
		case RADEON_HPD_1:
805
			if (RREG32(DC_HPD1_INT_STATUS) & DC_HPDx_SENSE)
806
				connected = true;
807
			break;
808
		case RADEON_HPD_2:
809
			if (RREG32(DC_HPD2_INT_STATUS) & DC_HPDx_SENSE)
810
				connected = true;
811
			break;
812
		case RADEON_HPD_3:
813
			if (RREG32(DC_HPD3_INT_STATUS) & DC_HPDx_SENSE)
814
				connected = true;
815
			break;
816
		case RADEON_HPD_4:
817
			if (RREG32(DC_HPD4_INT_STATUS) & DC_HPDx_SENSE)
818
				connected = true;
819
			break;
820
			/* DCE 3.2 */
821
		case RADEON_HPD_5:
822
			if (RREG32(DC_HPD5_INT_STATUS) & DC_HPDx_SENSE)
823
				connected = true;
824
			break;
825
		case RADEON_HPD_6:
826
			if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE)
827
				connected = true;
828
			break;
829
		default:
830
			break;
831
		}
832
	} else {
833
		switch (hpd) {
834
		case RADEON_HPD_1:
835
			if (RREG32(DC_HOT_PLUG_DETECT1_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
836
				connected = true;
837
			break;
838
		case RADEON_HPD_2:
839
			if (RREG32(DC_HOT_PLUG_DETECT2_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
840
				connected = true;
841
			break;
842
		case RADEON_HPD_3:
843
			if (RREG32(DC_HOT_PLUG_DETECT3_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
844
				connected = true;
845
			break;
846
		default:
847
			break;
848
		}
849
	}
850
	return connected;
851
}
852
 
853
void r600_hpd_set_polarity(struct radeon_device *rdev,
854
			   enum radeon_hpd_id hpd)
855
{
856
	u32 tmp;
857
	bool connected = r600_hpd_sense(rdev, hpd);
858
 
859
	if (ASIC_IS_DCE3(rdev)) {
860
		switch (hpd) {
861
		case RADEON_HPD_1:
862
			tmp = RREG32(DC_HPD1_INT_CONTROL);
863
			if (connected)
864
				tmp &= ~DC_HPDx_INT_POLARITY;
865
			else
866
				tmp |= DC_HPDx_INT_POLARITY;
867
			WREG32(DC_HPD1_INT_CONTROL, tmp);
868
			break;
869
		case RADEON_HPD_2:
870
			tmp = RREG32(DC_HPD2_INT_CONTROL);
871
			if (connected)
872
				tmp &= ~DC_HPDx_INT_POLARITY;
873
			else
874
				tmp |= DC_HPDx_INT_POLARITY;
875
			WREG32(DC_HPD2_INT_CONTROL, tmp);
876
			break;
877
		case RADEON_HPD_3:
878
			tmp = RREG32(DC_HPD3_INT_CONTROL);
879
			if (connected)
880
				tmp &= ~DC_HPDx_INT_POLARITY;
881
			else
882
				tmp |= DC_HPDx_INT_POLARITY;
883
			WREG32(DC_HPD3_INT_CONTROL, tmp);
884
			break;
885
		case RADEON_HPD_4:
886
			tmp = RREG32(DC_HPD4_INT_CONTROL);
887
			if (connected)
888
				tmp &= ~DC_HPDx_INT_POLARITY;
889
			else
890
				tmp |= DC_HPDx_INT_POLARITY;
891
			WREG32(DC_HPD4_INT_CONTROL, tmp);
892
			break;
893
		case RADEON_HPD_5:
894
			tmp = RREG32(DC_HPD5_INT_CONTROL);
895
			if (connected)
896
				tmp &= ~DC_HPDx_INT_POLARITY;
897
			else
898
				tmp |= DC_HPDx_INT_POLARITY;
899
			WREG32(DC_HPD5_INT_CONTROL, tmp);
900
			break;
901
			/* DCE 3.2 */
902
		case RADEON_HPD_6:
903
			tmp = RREG32(DC_HPD6_INT_CONTROL);
904
			if (connected)
905
				tmp &= ~DC_HPDx_INT_POLARITY;
906
			else
907
				tmp |= DC_HPDx_INT_POLARITY;
908
			WREG32(DC_HPD6_INT_CONTROL, tmp);
909
			break;
910
		default:
911
			break;
912
		}
913
	} else {
914
		switch (hpd) {
915
		case RADEON_HPD_1:
916
			tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL);
917
			if (connected)
918
				tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
919
			else
920
				tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
921
			WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
922
			break;
923
		case RADEON_HPD_2:
924
			tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL);
925
			if (connected)
926
				tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
927
			else
928
				tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
929
			WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
930
			break;
931
		case RADEON_HPD_3:
932
			tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL);
933
			if (connected)
934
				tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
935
			else
936
				tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
937
			WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
938
			break;
939
		default:
940
			break;
941
		}
942
	}
943
}
944
 
945
void r600_hpd_init(struct radeon_device *rdev)
946
{
947
	struct drm_device *dev = rdev->ddev;
948
	struct drm_connector *connector;
2997 Serge 949
	unsigned enable = 0;
1321 serge 950
 
2997 Serge 951
	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
952
		struct radeon_connector *radeon_connector = to_radeon_connector(connector);
953
 
954
		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
955
		    connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
956
			/* don't try to enable hpd on eDP or LVDS avoid breaking the
957
			 * aux dp channel on imac and help (but not completely fix)
958
			 * https://bugzilla.redhat.com/show_bug.cgi?id=726143
959
			 */
960
			continue;
961
		}
6104 serge 962
		if (ASIC_IS_DCE3(rdev)) {
963
			u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) | DC_HPDx_RX_INT_TIMER(0xfa);
964
			if (ASIC_IS_DCE32(rdev))
965
				tmp |= DC_HPDx_EN;
1321 serge 966
 
967
			switch (radeon_connector->hpd.hpd) {
968
			case RADEON_HPD_1:
969
				WREG32(DC_HPD1_CONTROL, tmp);
970
				break;
971
			case RADEON_HPD_2:
972
				WREG32(DC_HPD2_CONTROL, tmp);
973
				break;
974
			case RADEON_HPD_3:
975
				WREG32(DC_HPD3_CONTROL, tmp);
976
				break;
977
			case RADEON_HPD_4:
978
				WREG32(DC_HPD4_CONTROL, tmp);
979
				break;
980
				/* DCE 3.2 */
981
			case RADEON_HPD_5:
982
				WREG32(DC_HPD5_CONTROL, tmp);
983
				break;
984
			case RADEON_HPD_6:
985
				WREG32(DC_HPD6_CONTROL, tmp);
986
				break;
987
			default:
988
				break;
989
			}
6104 serge 990
		} else {
1321 serge 991
			switch (radeon_connector->hpd.hpd) {
992
			case RADEON_HPD_1:
993
				WREG32(DC_HOT_PLUG_DETECT1_CONTROL, DC_HOT_PLUG_DETECTx_EN);
994
				break;
995
			case RADEON_HPD_2:
996
				WREG32(DC_HOT_PLUG_DETECT2_CONTROL, DC_HOT_PLUG_DETECTx_EN);
997
				break;
998
			case RADEON_HPD_3:
999
				WREG32(DC_HOT_PLUG_DETECT3_CONTROL, DC_HOT_PLUG_DETECTx_EN);
1000
				break;
1001
			default:
1002
				break;
1003
			}
1004
		}
2997 Serge 1005
		enable |= 1 << radeon_connector->hpd.hpd;
1006
		radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
1321 serge 1007
	}
2997 Serge 1008
//	radeon_irq_kms_enable_hpd(rdev, enable);
1321 serge 1009
}
1010
 
1011
void r600_hpd_fini(struct radeon_device *rdev)
1012
{
1013
	struct drm_device *dev = rdev->ddev;
1014
	struct drm_connector *connector;
2997 Serge 1015
	unsigned disable = 0;
1321 serge 1016
 
6104 serge 1017
	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1018
		struct radeon_connector *radeon_connector = to_radeon_connector(connector);
2997 Serge 1019
		if (ASIC_IS_DCE3(rdev)) {
1321 serge 1020
			switch (radeon_connector->hpd.hpd) {
1021
			case RADEON_HPD_1:
1022
				WREG32(DC_HPD1_CONTROL, 0);
1023
				break;
1024
			case RADEON_HPD_2:
1025
				WREG32(DC_HPD2_CONTROL, 0);
1026
				break;
1027
			case RADEON_HPD_3:
1028
				WREG32(DC_HPD3_CONTROL, 0);
1029
				break;
1030
			case RADEON_HPD_4:
1031
				WREG32(DC_HPD4_CONTROL, 0);
1032
				break;
1033
				/* DCE 3.2 */
1034
			case RADEON_HPD_5:
1035
				WREG32(DC_HPD5_CONTROL, 0);
1036
				break;
1037
			case RADEON_HPD_6:
1038
				WREG32(DC_HPD6_CONTROL, 0);
1039
				break;
1040
			default:
1041
				break;
1042
			}
6104 serge 1043
		} else {
1321 serge 1044
			switch (radeon_connector->hpd.hpd) {
1045
			case RADEON_HPD_1:
1046
				WREG32(DC_HOT_PLUG_DETECT1_CONTROL, 0);
1047
				break;
1048
			case RADEON_HPD_2:
1049
				WREG32(DC_HOT_PLUG_DETECT2_CONTROL, 0);
1050
				break;
1051
			case RADEON_HPD_3:
1052
				WREG32(DC_HOT_PLUG_DETECT3_CONTROL, 0);
1053
				break;
1054
			default:
1055
				break;
1056
			}
1057
		}
2997 Serge 1058
		disable |= 1 << radeon_connector->hpd.hpd;
1321 serge 1059
	}
2997 Serge 1060
//	radeon_irq_kms_disable_hpd(rdev, disable);
1321 serge 1061
}
1062
 
1128 serge 1063
/*
1221 serge 1064
 * R600 PCIE GART
1128 serge 1065
 */
1221 serge 1066
void r600_pcie_gart_tlb_flush(struct radeon_device *rdev)
1067
{
1068
	unsigned i;
1069
	u32 tmp;
1128 serge 1070
 
1430 serge 1071
	/* flush hdp cache so updates hit vram */
1963 serge 1072
	if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) &&
1073
	    !(rdev->flags & RADEON_IS_AGP)) {
2997 Serge 1074
		void __iomem *ptr = (void *)rdev->gart.ptr;
1963 serge 1075
		u32 tmp;
1076
 
1077
		/* r7xx hw bug.  write to HDP_DEBUG1 followed by fb read
1078
		 * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL
1079
		 * This seems to cause problems on some AGP cards. Just use the old
1080
		 * method for them.
1081
		 */
1082
		WREG32(HDP_DEBUG1, 0);
1083
		tmp = readl((void __iomem *)ptr);
1084
	} else
6104 serge 1085
		WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
1430 serge 1086
 
1221 serge 1087
	WREG32(VM_CONTEXT0_INVALIDATION_LOW_ADDR, rdev->mc.gtt_start >> 12);
1088
	WREG32(VM_CONTEXT0_INVALIDATION_HIGH_ADDR, (rdev->mc.gtt_end - 1) >> 12);
1089
	WREG32(VM_CONTEXT0_REQUEST_RESPONSE, REQUEST_TYPE(1));
1090
	for (i = 0; i < rdev->usec_timeout; i++) {
1091
		/* read MC_STATUS */
1092
		tmp = RREG32(VM_CONTEXT0_REQUEST_RESPONSE);
1093
		tmp = (tmp & RESPONSE_TYPE_MASK) >> RESPONSE_TYPE_SHIFT;
1094
		if (tmp == 2) {
1095
			printk(KERN_WARNING "[drm] r600 flush TLB failed\n");
1096
			return;
1097
		}
1098
		if (tmp) {
1099
			return;
1100
		}
1101
		udelay(1);
1128 serge 1102
	}
1221 serge 1103
}
1128 serge 1104
 
1221 serge 1105
int r600_pcie_gart_init(struct radeon_device *rdev)
1106
{
1107
	int r;
1108
 
2997 Serge 1109
	if (rdev->gart.robj) {
1963 serge 1110
		WARN(1, "R600 PCIE GART already initialized\n");
1221 serge 1111
		return 0;
1112
	}
1113
	/* Initialize common gart structure */
1114
	r = radeon_gart_init(rdev);
1115
	if (r)
1116
		return r;
1117
	rdev->gart.table_size = rdev->gart.num_gpu_pages * 8;
1118
	return radeon_gart_table_vram_alloc(rdev);
1119
}
1120
 
2997 Serge 1121
static int r600_pcie_gart_enable(struct radeon_device *rdev)
1221 serge 1122
{
1123
	u32 tmp;
1124
	int r, i;
1125
 
2997 Serge 1126
	if (rdev->gart.robj == NULL) {
1221 serge 1127
		dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
1128
		return -EINVAL;
1129
	}
1130
	r = radeon_gart_table_vram_pin(rdev);
1131
	if (r)
1132
		return r;
1133
 
1134
	/* Setup L2 cache */
1135
	WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
1136
				ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
1137
				EFFECTIVE_L2_QUEUE_SIZE(7));
1138
	WREG32(VM_L2_CNTL2, 0);
1139
	WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
1140
	/* Setup TLB control */
1141
	tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
1142
		SYSTEM_ACCESS_MODE_NOT_IN_SYS |
1143
		EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
1144
		ENABLE_WAIT_L2_QUERY;
1145
	WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
1146
	WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
1147
	WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp | ENABLE_L1_STRICT_ORDERING);
1148
	WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
1149
	WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
1150
	WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
1151
	WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
1152
	WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
1153
	WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
1154
	WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
1155
	WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
1156
	WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
5271 serge 1157
	WREG32(MC_VM_L1_TLB_MCB_RD_UVD_CNTL, tmp);
1158
	WREG32(MC_VM_L1_TLB_MCB_WR_UVD_CNTL, tmp);
1221 serge 1159
	WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
1160
	WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
1161
	WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
1162
	WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
1163
	WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
1164
	WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
1165
				RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
1166
	WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
1167
			(u32)(rdev->dummy_page.addr >> 12));
1168
	for (i = 1; i < 7; i++)
1169
		WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
1170
 
1171
	r600_pcie_gart_tlb_flush(rdev);
2997 Serge 1172
	DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
1173
		 (unsigned)(rdev->mc.gtt_size >> 20),
1174
		 (unsigned long long)rdev->gart.table_addr);
1221 serge 1175
	rdev->gart.ready = true;
1128 serge 1176
	return 0;
1177
}
1178
 
2997 Serge 1179
static void r600_pcie_gart_disable(struct radeon_device *rdev)
1128 serge 1180
{
1221 serge 1181
	u32 tmp;
2997 Serge 1182
	int i;
1221 serge 1183
 
1184
	/* Disable all tables */
1185
	for (i = 0; i < 7; i++)
1186
		WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
1187
 
1188
	/* Disable L2 cache */
1189
	WREG32(VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING |
1190
				EFFECTIVE_L2_QUEUE_SIZE(7));
1191
	WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
1192
	/* Setup L1 TLB control */
1193
	tmp = EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
1194
		ENABLE_WAIT_L2_QUERY;
1195
	WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
1196
	WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
1197
	WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
1198
	WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
1199
	WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
1200
	WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
1201
	WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
1202
	WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
1203
	WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp);
1204
	WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp);
1205
	WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
1206
	WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
1207
	WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp);
1208
	WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
5271 serge 1209
	WREG32(MC_VM_L1_TLB_MCB_RD_UVD_CNTL, tmp);
1210
	WREG32(MC_VM_L1_TLB_MCB_WR_UVD_CNTL, tmp);
2997 Serge 1211
	radeon_gart_table_vram_unpin(rdev);
1128 serge 1212
}
1213
 
2997 Serge 1214
static void r600_pcie_gart_fini(struct radeon_device *rdev)
1221 serge 1215
{
1963 serge 1216
	radeon_gart_fini(rdev);
1221 serge 1217
	r600_pcie_gart_disable(rdev);
1218
	radeon_gart_table_vram_free(rdev);
1219
}
1128 serge 1220
 
2997 Serge 1221
static void r600_agp_enable(struct radeon_device *rdev)
1128 serge 1222
{
1221 serge 1223
	u32 tmp;
1224
	int i;
1225
 
1226
	/* Setup L2 cache */
1227
	WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
1228
				ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
1229
				EFFECTIVE_L2_QUEUE_SIZE(7));
1230
	WREG32(VM_L2_CNTL2, 0);
1231
	WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
1232
	/* Setup TLB control */
1233
	tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
1234
		SYSTEM_ACCESS_MODE_NOT_IN_SYS |
1235
		EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
1236
		ENABLE_WAIT_L2_QUERY;
1237
	WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
1238
	WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
1239
	WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp | ENABLE_L1_STRICT_ORDERING);
1240
	WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
1241
	WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
1242
	WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
1243
	WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
1244
	WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
1245
	WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
1246
	WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
1247
	WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
1248
	WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
1249
	WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
1250
	WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
1251
	for (i = 0; i < 7; i++)
1252
		WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
1128 serge 1253
}
1254
 
1255
int r600_mc_wait_for_idle(struct radeon_device *rdev)
1256
{
1221 serge 1257
	unsigned i;
1258
	u32 tmp;
1259
 
1260
	for (i = 0; i < rdev->usec_timeout; i++) {
1261
		/* read MC_STATUS */
1262
		tmp = RREG32(R_000E50_SRBM_STATUS) & 0x3F00;
1263
		if (!tmp)
6104 serge 1264
			return 0;
1221 serge 1265
		udelay(1);
1266
	}
1267
	return -1;
1128 serge 1268
}
1269
 
3764 Serge 1270
uint32_t rs780_mc_rreg(struct radeon_device *rdev, uint32_t reg)
1271
{
5078 serge 1272
	unsigned long flags;
3764 Serge 1273
	uint32_t r;
1274
 
5078 serge 1275
	spin_lock_irqsave(&rdev->mc_idx_lock, flags);
3764 Serge 1276
	WREG32(R_0028F8_MC_INDEX, S_0028F8_MC_IND_ADDR(reg));
1277
	r = RREG32(R_0028FC_MC_DATA);
1278
	WREG32(R_0028F8_MC_INDEX, ~C_0028F8_MC_IND_ADDR);
5078 serge 1279
	spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
3764 Serge 1280
	return r;
1281
}
1282
 
1283
void rs780_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
1284
{
5078 serge 1285
	unsigned long flags;
1286
 
1287
	spin_lock_irqsave(&rdev->mc_idx_lock, flags);
3764 Serge 1288
	WREG32(R_0028F8_MC_INDEX, S_0028F8_MC_IND_ADDR(reg) |
1289
		S_0028F8_MC_IND_WR_EN(1));
1290
	WREG32(R_0028FC_MC_DATA, v);
1291
	WREG32(R_0028F8_MC_INDEX, 0x7F);
5078 serge 1292
	spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
3764 Serge 1293
}
1294
 
1221 serge 1295
static void r600_mc_program(struct radeon_device *rdev)
1128 serge 1296
{
1221 serge 1297
	struct rv515_mc_save save;
1298
	u32 tmp;
1299
	int i, j;
1300
 
1301
	/* Initialize HDP */
1302
	for (i = 0, j = 0; i < 32; i++, j += 0x18) {
1303
		WREG32((0x2c14 + j), 0x00000000);
1304
		WREG32((0x2c18 + j), 0x00000000);
1305
		WREG32((0x2c1c + j), 0x00000000);
1306
		WREG32((0x2c20 + j), 0x00000000);
1307
		WREG32((0x2c24 + j), 0x00000000);
1308
	}
1309
	WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
1310
 
1311
	rv515_mc_stop(rdev, &save);
1312
	if (r600_mc_wait_for_idle(rdev)) {
1313
		dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1314
	}
1315
	/* Lockout access through VGA aperture (doesn't exist before R600) */
1316
	WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
1317
	/* Update configuration */
1318
	if (rdev->flags & RADEON_IS_AGP) {
1319
		if (rdev->mc.vram_start < rdev->mc.gtt_start) {
1320
			/* VRAM before AGP */
1321
			WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
1322
				rdev->mc.vram_start >> 12);
1323
			WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
1324
				rdev->mc.gtt_end >> 12);
1325
		} else {
1326
			/* VRAM after AGP */
1327
			WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
1328
				rdev->mc.gtt_start >> 12);
1329
			WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
1330
				rdev->mc.vram_end >> 12);
1331
		}
1332
	} else {
1333
		WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start >> 12);
1334
		WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.vram_end >> 12);
1335
	}
2997 Serge 1336
	WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, rdev->vram_scratch.gpu_addr >> 12);
1221 serge 1337
	tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
1338
	tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
1339
	WREG32(MC_VM_FB_LOCATION, tmp);
1340
	WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
1341
	WREG32(HDP_NONSURFACE_INFO, (2 << 7));
1963 serge 1342
	WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF);
1221 serge 1343
	if (rdev->flags & RADEON_IS_AGP) {
1344
		WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 22);
1345
		WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 22);
1346
		WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22);
1347
	} else {
1348
		WREG32(MC_VM_AGP_BASE, 0);
1349
		WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
1350
		WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
1351
	}
1352
	if (r600_mc_wait_for_idle(rdev)) {
1353
		dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1354
	}
1355
	rv515_mc_resume(rdev, &save);
1356
	/* we need to own VRAM, so turn off the VGA renderer here
1357
	 * to stop it overwriting our objects */
1358
	rv515_vga_render_disable(rdev);
1128 serge 1359
}
1360
 
1430 serge 1361
/**
1362
 * r600_vram_gtt_location - try to find VRAM & GTT location
1363
 * @rdev: radeon device structure holding all necessary informations
1364
 * @mc: memory controller structure holding memory informations
1365
 *
1366
 * Function will place try to place VRAM at same place as in CPU (PCI)
1367
 * address space as some GPU seems to have issue when we reprogram at
1368
 * different address space.
1369
 *
1370
 * If there is not enough space to fit the unvisible VRAM after the
1371
 * aperture then we limit the VRAM size to the aperture.
1372
 *
1373
 * If we are using AGP then place VRAM adjacent to AGP aperture are we need
1374
 * them to be in one from GPU point of view so that we can program GPU to
1375
 * catch access outside them (weird GPU policy see ??).
1376
 *
1377
 * This function will never fails, worst case are limiting VRAM or GTT.
1378
 *
1379
 * Note: GTT start, end, size should be initialized before calling this
1380
 * function on AGP platform.
1381
 */
1963 serge 1382
static void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
1430 serge 1383
{
1384
	u64 size_bf, size_af;
1385
 
1386
	if (mc->mc_vram_size > 0xE0000000) {
1387
		/* leave room for at least 512M GTT */
1388
		dev_warn(rdev->dev, "limiting VRAM\n");
1389
		mc->real_vram_size = 0xE0000000;
1390
		mc->mc_vram_size = 0xE0000000;
1391
	}
1392
	if (rdev->flags & RADEON_IS_AGP) {
1393
		size_bf = mc->gtt_start;
3764 Serge 1394
		size_af = mc->mc_mask - mc->gtt_end;
1430 serge 1395
		if (size_bf > size_af) {
1396
			if (mc->mc_vram_size > size_bf) {
1397
				dev_warn(rdev->dev, "limiting VRAM\n");
1398
				mc->real_vram_size = size_bf;
1399
				mc->mc_vram_size = size_bf;
1400
			}
1401
			mc->vram_start = mc->gtt_start - mc->mc_vram_size;
1402
		} else {
1403
			if (mc->mc_vram_size > size_af) {
1404
				dev_warn(rdev->dev, "limiting VRAM\n");
1405
				mc->real_vram_size = size_af;
1406
				mc->mc_vram_size = size_af;
1407
			}
2997 Serge 1408
			mc->vram_start = mc->gtt_end + 1;
1430 serge 1409
		}
1410
		mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
1411
		dev_info(rdev->dev, "VRAM: %lluM 0x%08llX - 0x%08llX (%lluM used)\n",
1412
				mc->mc_vram_size >> 20, mc->vram_start,
1413
				mc->vram_end, mc->real_vram_size >> 20);
1414
	} else {
1415
		u64 base = 0;
1963 serge 1416
		if (rdev->flags & RADEON_IS_IGP) {
1417
			base = RREG32(MC_VM_FB_LOCATION) & 0xFFFF;
1418
			base <<= 24;
1419
		}
1430 serge 1420
		radeon_vram_location(rdev, &rdev->mc, base);
1963 serge 1421
		rdev->mc.gtt_base_align = 0;
1430 serge 1422
		radeon_gtt_location(rdev, mc);
1423
	}
1424
}
1425
 
2997 Serge 1426
static int r600_mc_init(struct radeon_device *rdev)
1128 serge 1427
{
1221 serge 1428
	u32 tmp;
1268 serge 1429
	int chansize, numchan;
3764 Serge 1430
	uint32_t h_addr, l_addr;
1431
	unsigned long long k8_addr;
1128 serge 1432
 
1221 serge 1433
	/* Get VRAM informations */
1128 serge 1434
	rdev->mc.vram_is_ddr = true;
1221 serge 1435
	tmp = RREG32(RAMCFG);
1436
	if (tmp & CHANSIZE_OVERRIDE) {
1128 serge 1437
		chansize = 16;
1221 serge 1438
	} else if (tmp & CHANSIZE_MASK) {
1128 serge 1439
		chansize = 64;
1440
	} else {
1441
		chansize = 32;
1442
	}
1268 serge 1443
	tmp = RREG32(CHMAP);
1444
	switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
1445
	case 0:
1446
	default:
1447
		numchan = 1;
1448
		break;
1449
	case 1:
1450
		numchan = 2;
1451
		break;
1452
	case 2:
1453
		numchan = 4;
1454
		break;
1455
	case 3:
1456
		numchan = 8;
1457
		break;
1128 serge 1458
	}
1268 serge 1459
	rdev->mc.vram_width = numchan * chansize;
1221 serge 1460
	/* Could aper size report 0 ? */
1963 serge 1461
	rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
1462
	rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
1221 serge 1463
	/* Setup GPU memory space */
1464
	rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
1465
	rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
1430 serge 1466
	rdev->mc.visible_vram_size = rdev->mc.aper_size;
1467
	r600_vram_gtt_location(rdev, &rdev->mc);
1963 serge 1468
 
1469
	if (rdev->flags & RADEON_IS_IGP) {
1470
		rs690_pm_info(rdev);
1403 serge 1471
		rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
3764 Serge 1472
 
1473
		if (rdev->family == CHIP_RS780 || rdev->family == CHIP_RS880) {
1474
			/* Use K8 direct mapping for fast fb access. */
1475
			rdev->fastfb_working = false;
1476
			h_addr = G_000012_K8_ADDR_EXT(RREG32_MC(R_000012_MC_MISC_UMA_CNTL));
1477
			l_addr = RREG32_MC(R_000011_K8_FB_LOCATION);
1478
			k8_addr = ((unsigned long long)h_addr) << 32 | l_addr;
1479
#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
1480
			if (k8_addr + rdev->mc.visible_vram_size < 0x100000000ULL)
1481
#endif
1482
			{
1483
				/* FastFB shall be used with UMA memory. Here it is simply disabled when sideport
1484
		 		* memory is present.
1485
		 		*/
1486
				if (rdev->mc.igp_sideport_enabled == false && radeon_fastfb == 1) {
1487
					DRM_INFO("Direct mapping: aper base at 0x%llx, replaced by direct mapping base 0x%llx.\n",
1488
						(unsigned long long)rdev->mc.aper_base, k8_addr);
1489
					rdev->mc.aper_base = (resource_size_t)k8_addr;
1490
					rdev->fastfb_working = true;
6104 serge 1491
				}
3764 Serge 1492
			}
7146 serge 1493
		}
3764 Serge 1494
	}
1495
 
1963 serge 1496
	radeon_update_bandwidth_info(rdev);
1221 serge 1497
	return 0;
1128 serge 1498
}
1499
 
2997 Serge 1500
int r600_vram_scratch_init(struct radeon_device *rdev)
1501
{
1502
	int r;
1503
 
1504
	if (rdev->vram_scratch.robj == NULL) {
1505
		r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE,
1506
				     PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
5271 serge 1507
				     0, NULL, NULL, &rdev->vram_scratch.robj);
2997 Serge 1508
		if (r) {
1509
			return r;
1510
		}
1511
	}
1512
 
1513
	r = radeon_bo_reserve(rdev->vram_scratch.robj, false);
1514
	if (unlikely(r != 0))
1515
		return r;
1516
	r = radeon_bo_pin(rdev->vram_scratch.robj,
1517
			  RADEON_GEM_DOMAIN_VRAM, &rdev->vram_scratch.gpu_addr);
1518
	if (r) {
1519
		radeon_bo_unreserve(rdev->vram_scratch.robj);
1520
		return r;
1521
	}
1522
	r = radeon_bo_kmap(rdev->vram_scratch.robj,
1523
				(void **)&rdev->vram_scratch.ptr);
1524
	if (r)
1525
		radeon_bo_unpin(rdev->vram_scratch.robj);
1526
	radeon_bo_unreserve(rdev->vram_scratch.robj);
1527
 
1528
	return r;
1529
}
3764 Serge 1530
 
1531
void r600_vram_scratch_fini(struct radeon_device *rdev)
1128 serge 1532
{
3764 Serge 1533
	int r;
1128 serge 1534
 
3764 Serge 1535
	if (rdev->vram_scratch.robj == NULL) {
3192 Serge 1536
		return;
3764 Serge 1537
	}
1538
	r = radeon_bo_reserve(rdev->vram_scratch.robj, false);
1539
	if (likely(r == 0)) {
1540
		radeon_bo_kunmap(rdev->vram_scratch.robj);
1541
		radeon_bo_unpin(rdev->vram_scratch.robj);
1542
		radeon_bo_unreserve(rdev->vram_scratch.robj);
1543
	}
1544
	radeon_bo_unref(&rdev->vram_scratch.robj);
1545
}
1963 serge 1546
 
3764 Serge 1547
void r600_set_bios_scratch_engine_hung(struct radeon_device *rdev, bool hung)
1548
{
1549
	u32 tmp = RREG32(R600_BIOS_3_SCRATCH);
1550
 
1551
	if (hung)
1552
		tmp |= ATOM_S3_ASIC_GUI_ENGINE_HUNG;
1553
	else
1554
		tmp &= ~ATOM_S3_ASIC_GUI_ENGINE_HUNG;
1555
 
1556
	WREG32(R600_BIOS_3_SCRATCH, tmp);
1557
}
1558
 
1559
static void r600_print_gpu_status_regs(struct radeon_device *rdev)
1560
{
3192 Serge 1561
	dev_info(rdev->dev, "  R_008010_GRBM_STATUS      = 0x%08X\n",
6104 serge 1562
		 RREG32(R_008010_GRBM_STATUS));
3192 Serge 1563
	dev_info(rdev->dev, "  R_008014_GRBM_STATUS2     = 0x%08X\n",
6104 serge 1564
		 RREG32(R_008014_GRBM_STATUS2));
3192 Serge 1565
	dev_info(rdev->dev, "  R_000E50_SRBM_STATUS      = 0x%08X\n",
6104 serge 1566
		 RREG32(R_000E50_SRBM_STATUS));
2997 Serge 1567
	dev_info(rdev->dev, "  R_008674_CP_STALLED_STAT1 = 0x%08X\n",
6104 serge 1568
		 RREG32(CP_STALLED_STAT1));
2997 Serge 1569
	dev_info(rdev->dev, "  R_008678_CP_STALLED_STAT2 = 0x%08X\n",
6104 serge 1570
		 RREG32(CP_STALLED_STAT2));
2997 Serge 1571
	dev_info(rdev->dev, "  R_00867C_CP_BUSY_STAT     = 0x%08X\n",
6104 serge 1572
		 RREG32(CP_BUSY_STAT));
2997 Serge 1573
	dev_info(rdev->dev, "  R_008680_CP_STAT          = 0x%08X\n",
6104 serge 1574
		 RREG32(CP_STAT));
3764 Serge 1575
	dev_info(rdev->dev, "  R_00D034_DMA_STATUS_REG   = 0x%08X\n",
1576
		RREG32(DMA_STATUS_REG));
1577
}
3192 Serge 1578
 
3764 Serge 1579
static bool r600_is_display_hung(struct radeon_device *rdev)
1580
{
1581
	u32 crtc_hung = 0;
1582
	u32 crtc_status[2];
1583
	u32 i, j, tmp;
1584
 
1585
	for (i = 0; i < rdev->num_crtc; i++) {
1586
		if (RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i]) & AVIVO_CRTC_EN) {
1587
			crtc_status[i] = RREG32(AVIVO_D1CRTC_STATUS_HV_COUNT + crtc_offsets[i]);
1588
			crtc_hung |= (1 << i);
1589
		}
1590
	}
1591
 
1592
	for (j = 0; j < 10; j++) {
1593
		for (i = 0; i < rdev->num_crtc; i++) {
1594
			if (crtc_hung & (1 << i)) {
1595
				tmp = RREG32(AVIVO_D1CRTC_STATUS_HV_COUNT + crtc_offsets[i]);
1596
				if (tmp != crtc_status[i])
1597
					crtc_hung &= ~(1 << i);
1598
			}
1599
		}
1600
		if (crtc_hung == 0)
1601
			return false;
1602
		udelay(100);
1603
	}
1604
 
1605
	return true;
1606
}
1607
 
5078 serge 1608
u32 r600_gpu_check_soft_reset(struct radeon_device *rdev)
3764 Serge 1609
{
1610
	u32 reset_mask = 0;
1611
	u32 tmp;
1612
 
1613
	/* GRBM_STATUS */
1614
	tmp = RREG32(R_008010_GRBM_STATUS);
1615
	if (rdev->family >= CHIP_RV770) {
1616
		if (G_008010_PA_BUSY(tmp) | G_008010_SC_BUSY(tmp) |
1617
		    G_008010_SH_BUSY(tmp) | G_008010_SX_BUSY(tmp) |
1618
		    G_008010_TA_BUSY(tmp) | G_008010_VGT_BUSY(tmp) |
1619
		    G_008010_DB03_BUSY(tmp) | G_008010_CB03_BUSY(tmp) |
1620
		    G_008010_SPI03_BUSY(tmp) | G_008010_VGT_BUSY_NO_DMA(tmp))
1621
			reset_mask |= RADEON_RESET_GFX;
1622
	} else {
1623
		if (G_008010_PA_BUSY(tmp) | G_008010_SC_BUSY(tmp) |
1624
		    G_008010_SH_BUSY(tmp) | G_008010_SX_BUSY(tmp) |
1625
		    G_008010_TA03_BUSY(tmp) | G_008010_VGT_BUSY(tmp) |
1626
		    G_008010_DB03_BUSY(tmp) | G_008010_CB03_BUSY(tmp) |
1627
		    G_008010_SPI03_BUSY(tmp) | G_008010_VGT_BUSY_NO_DMA(tmp))
1628
			reset_mask |= RADEON_RESET_GFX;
1629
	}
1630
 
1631
	if (G_008010_CF_RQ_PENDING(tmp) | G_008010_PF_RQ_PENDING(tmp) |
1632
	    G_008010_CP_BUSY(tmp) | G_008010_CP_COHERENCY_BUSY(tmp))
1633
		reset_mask |= RADEON_RESET_CP;
1634
 
1635
	if (G_008010_GRBM_EE_BUSY(tmp))
1636
		reset_mask |= RADEON_RESET_GRBM | RADEON_RESET_GFX | RADEON_RESET_CP;
1637
 
1638
	/* DMA_STATUS_REG */
1639
	tmp = RREG32(DMA_STATUS_REG);
1640
	if (!(tmp & DMA_IDLE))
1641
		reset_mask |= RADEON_RESET_DMA;
1642
 
1643
	/* SRBM_STATUS */
1644
	tmp = RREG32(R_000E50_SRBM_STATUS);
1645
	if (G_000E50_RLC_RQ_PENDING(tmp) | G_000E50_RLC_BUSY(tmp))
1646
		reset_mask |= RADEON_RESET_RLC;
1647
 
1648
	if (G_000E50_IH_BUSY(tmp))
1649
		reset_mask |= RADEON_RESET_IH;
1650
 
1651
	if (G_000E50_SEM_BUSY(tmp))
1652
		reset_mask |= RADEON_RESET_SEM;
1653
 
1654
	if (G_000E50_GRBM_RQ_PENDING(tmp))
1655
		reset_mask |= RADEON_RESET_GRBM;
1656
 
1657
	if (G_000E50_VMC_BUSY(tmp))
1658
		reset_mask |= RADEON_RESET_VMC;
1659
 
1660
	if (G_000E50_MCB_BUSY(tmp) | G_000E50_MCDZ_BUSY(tmp) |
1661
	    G_000E50_MCDY_BUSY(tmp) | G_000E50_MCDX_BUSY(tmp) |
1662
	    G_000E50_MCDW_BUSY(tmp))
1663
		reset_mask |= RADEON_RESET_MC;
1664
 
1665
	if (r600_is_display_hung(rdev))
1666
		reset_mask |= RADEON_RESET_DISPLAY;
1667
 
1668
	/* Skip MC reset as it's mostly likely not hung, just busy */
1669
	if (reset_mask & RADEON_RESET_MC) {
1670
		DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask);
1671
		reset_mask &= ~RADEON_RESET_MC;
1672
	}
1673
 
1674
	return reset_mask;
1675
}
1676
 
1677
static void r600_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
1678
{
1679
	struct rv515_mc_save save;
1680
	u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
1681
	u32 tmp;
1682
 
1683
	if (reset_mask == 0)
1684
		return;
1685
 
1686
	dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask);
1687
 
1688
	r600_print_gpu_status_regs(rdev);
1689
 
1221 serge 1690
	/* Disable CP parsing/prefetching */
3764 Serge 1691
	if (rdev->family >= CHIP_RV770)
1692
		WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1) | S_0086D8_CP_PFP_HALT(1));
1693
	else
6104 serge 1694
		WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
3192 Serge 1695
 
3764 Serge 1696
	/* disable the RLC */
1697
	WREG32(RLC_CNTL, 0);
1698
 
1699
	if (reset_mask & RADEON_RESET_DMA) {
1700
		/* Disable DMA */
1701
		tmp = RREG32(DMA_RB_CNTL);
1702
		tmp &= ~DMA_RB_ENABLE;
1703
		WREG32(DMA_RB_CNTL, tmp);
1704
	}
1705
 
1706
	mdelay(50);
1707
 
1708
	rv515_mc_stop(rdev, &save);
1709
	if (r600_mc_wait_for_idle(rdev)) {
1710
		dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1711
	}
1712
 
1713
	if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE)) {
1714
		if (rdev->family >= CHIP_RV770)
1715
			grbm_soft_reset |= S_008020_SOFT_RESET_DB(1) |
1716
				S_008020_SOFT_RESET_CB(1) |
1717
				S_008020_SOFT_RESET_PA(1) |
1718
				S_008020_SOFT_RESET_SC(1) |
1719
				S_008020_SOFT_RESET_SPI(1) |
1720
				S_008020_SOFT_RESET_SX(1) |
1721
				S_008020_SOFT_RESET_SH(1) |
1722
				S_008020_SOFT_RESET_TC(1) |
1723
				S_008020_SOFT_RESET_TA(1) |
1724
				S_008020_SOFT_RESET_VC(1) |
1725
				S_008020_SOFT_RESET_VGT(1);
1726
		else
1727
			grbm_soft_reset |= S_008020_SOFT_RESET_CR(1) |
6104 serge 1728
				S_008020_SOFT_RESET_DB(1) |
1729
				S_008020_SOFT_RESET_CB(1) |
1730
				S_008020_SOFT_RESET_PA(1) |
1731
				S_008020_SOFT_RESET_SC(1) |
1732
				S_008020_SOFT_RESET_SMX(1) |
1733
				S_008020_SOFT_RESET_SPI(1) |
1734
				S_008020_SOFT_RESET_SX(1) |
1735
				S_008020_SOFT_RESET_SH(1) |
1736
				S_008020_SOFT_RESET_TC(1) |
1737
				S_008020_SOFT_RESET_TA(1) |
1738
				S_008020_SOFT_RESET_VC(1) |
1739
				S_008020_SOFT_RESET_VGT(1);
1221 serge 1740
	}
3764 Serge 1741
 
1742
	if (reset_mask & RADEON_RESET_CP) {
1743
		grbm_soft_reset |= S_008020_SOFT_RESET_CP(1) |
1744
			S_008020_SOFT_RESET_VGT(1);
1745
 
1746
		srbm_soft_reset |= S_000E60_SOFT_RESET_GRBM(1);
1747
	}
1748
 
1749
	if (reset_mask & RADEON_RESET_DMA) {
1750
		if (rdev->family >= CHIP_RV770)
1751
			srbm_soft_reset |= RV770_SOFT_RESET_DMA;
1752
		else
1753
			srbm_soft_reset |= SOFT_RESET_DMA;
1754
	}
1755
 
1756
	if (reset_mask & RADEON_RESET_RLC)
1757
		srbm_soft_reset |= S_000E60_SOFT_RESET_RLC(1);
1758
 
1759
	if (reset_mask & RADEON_RESET_SEM)
1760
		srbm_soft_reset |= S_000E60_SOFT_RESET_SEM(1);
1761
 
1762
	if (reset_mask & RADEON_RESET_IH)
1763
		srbm_soft_reset |= S_000E60_SOFT_RESET_IH(1);
1764
 
1765
	if (reset_mask & RADEON_RESET_GRBM)
1766
		srbm_soft_reset |= S_000E60_SOFT_RESET_GRBM(1);
1767
 
1768
	if (!(rdev->flags & RADEON_IS_IGP)) {
1769
		if (reset_mask & RADEON_RESET_MC)
1770
			srbm_soft_reset |= S_000E60_SOFT_RESET_MC(1);
1771
	}
1772
 
1773
	if (reset_mask & RADEON_RESET_VMC)
1774
		srbm_soft_reset |= S_000E60_SOFT_RESET_VMC(1);
1775
 
1776
	if (grbm_soft_reset) {
1777
		tmp = RREG32(R_008020_GRBM_SOFT_RESET);
1778
		tmp |= grbm_soft_reset;
6104 serge 1779
		dev_info(rdev->dev, "R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
1780
		WREG32(R_008020_GRBM_SOFT_RESET, tmp);
3764 Serge 1781
		tmp = RREG32(R_008020_GRBM_SOFT_RESET);
3192 Serge 1782
 
3764 Serge 1783
		udelay(50);
3192 Serge 1784
 
3764 Serge 1785
		tmp &= ~grbm_soft_reset;
1786
		WREG32(R_008020_GRBM_SOFT_RESET, tmp);
1787
		tmp = RREG32(R_008020_GRBM_SOFT_RESET);
1788
	}
3192 Serge 1789
 
3764 Serge 1790
	if (srbm_soft_reset) {
1791
		tmp = RREG32(SRBM_SOFT_RESET);
1792
		tmp |= srbm_soft_reset;
1793
		dev_info(rdev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
1794
		WREG32(SRBM_SOFT_RESET, tmp);
1795
		tmp = RREG32(SRBM_SOFT_RESET);
3192 Serge 1796
 
3764 Serge 1797
		udelay(50);
3192 Serge 1798
 
3764 Serge 1799
		tmp &= ~srbm_soft_reset;
1800
		WREG32(SRBM_SOFT_RESET, tmp);
1801
		tmp = RREG32(SRBM_SOFT_RESET);
1802
	}
3192 Serge 1803
 
3764 Serge 1804
	/* Wait a little for things to settle down */
1805
	mdelay(1);
3192 Serge 1806
 
3764 Serge 1807
	rv515_mc_resume(rdev, &save);
3192 Serge 1808
	udelay(50);
1809
 
3764 Serge 1810
	r600_print_gpu_status_regs(rdev);
3192 Serge 1811
}
1812
 
5078 serge 1813
static void r600_gpu_pci_config_reset(struct radeon_device *rdev)
1814
{
1815
	struct rv515_mc_save save;
1816
	u32 tmp, i;
1817
 
1818
	dev_info(rdev->dev, "GPU pci config reset\n");
1819
 
1820
	/* disable dpm? */
1821
 
1822
	/* Disable CP parsing/prefetching */
1823
	if (rdev->family >= CHIP_RV770)
1824
		WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1) | S_0086D8_CP_PFP_HALT(1));
1825
	else
1826
		WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
1827
 
1828
	/* disable the RLC */
1829
	WREG32(RLC_CNTL, 0);
1830
 
1831
	/* Disable DMA */
1832
	tmp = RREG32(DMA_RB_CNTL);
1833
	tmp &= ~DMA_RB_ENABLE;
1834
	WREG32(DMA_RB_CNTL, tmp);
1835
 
1836
	mdelay(50);
1837
 
1838
	/* set mclk/sclk to bypass */
1839
	if (rdev->family >= CHIP_RV770)
1840
		rv770_set_clk_bypass_mode(rdev);
1841
	/* disable BM */
1842
	pci_clear_master(rdev->pdev);
1843
	/* disable mem access */
1844
	rv515_mc_stop(rdev, &save);
1845
	if (r600_mc_wait_for_idle(rdev)) {
1846
		dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1847
	}
1848
 
1849
	/* BIF reset workaround.  Not sure if this is needed on 6xx */
1850
	tmp = RREG32(BUS_CNTL);
1851
	tmp |= VGA_COHE_SPEC_TIMER_DIS;
1852
	WREG32(BUS_CNTL, tmp);
1853
 
1854
	tmp = RREG32(BIF_SCRATCH0);
1855
 
1856
	/* reset */
1857
	radeon_pci_config_reset(rdev);
1858
	mdelay(1);
1859
 
1860
	/* BIF reset workaround.  Not sure if this is needed on 6xx */
1861
	tmp = SOFT_RESET_BIF;
1862
	WREG32(SRBM_SOFT_RESET, tmp);
1863
	mdelay(1);
1864
	WREG32(SRBM_SOFT_RESET, 0);
1865
 
1866
	/* wait for asic to come out of reset */
1867
	for (i = 0; i < rdev->usec_timeout; i++) {
1868
		if (RREG32(CONFIG_MEMSIZE) != 0xffffffff)
1869
			break;
1870
		udelay(1);
1871
	}
1872
}
1873
 
3764 Serge 1874
int r600_asic_reset(struct radeon_device *rdev)
3192 Serge 1875
{
3764 Serge 1876
	u32 reset_mask;
3192 Serge 1877
 
3764 Serge 1878
	reset_mask = r600_gpu_check_soft_reset(rdev);
3192 Serge 1879
 
3764 Serge 1880
	if (reset_mask)
1881
		r600_set_bios_scratch_engine_hung(rdev, true);
3192 Serge 1882
 
5078 serge 1883
	/* try soft reset */
3764 Serge 1884
	r600_gpu_soft_reset(rdev, reset_mask);
3192 Serge 1885
 
3764 Serge 1886
	reset_mask = r600_gpu_check_soft_reset(rdev);
3192 Serge 1887
 
5078 serge 1888
	/* try pci config reset */
1889
	if (reset_mask && radeon_hard_reset)
1890
		r600_gpu_pci_config_reset(rdev);
1891
 
1892
	reset_mask = r600_gpu_check_soft_reset(rdev);
1893
 
3764 Serge 1894
	if (!reset_mask)
1895
		r600_set_bios_scratch_engine_hung(rdev, false);
3192 Serge 1896
 
1221 serge 1897
	return 0;
1128 serge 1898
}
1899
 
3764 Serge 1900
/**
1901
 * r600_gfx_is_lockup - Check if the GFX engine is locked up
1902
 *
1903
 * @rdev: radeon_device pointer
1904
 * @ring: radeon_ring structure holding ring information
1905
 *
1906
 * Check if the GFX engine is locked up.
1907
 * Returns true if the engine appears to be locked up, false if not.
1908
 */
1909
bool r600_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
1221 serge 1910
{
3764 Serge 1911
	u32 reset_mask = r600_gpu_check_soft_reset(rdev);
1963 serge 1912
 
3764 Serge 1913
	if (!(reset_mask & (RADEON_RESET_GFX |
1914
			    RADEON_RESET_COMPUTE |
1915
			    RADEON_RESET_CP))) {
5078 serge 1916
		radeon_ring_lockup_update(rdev, ring);
1963 serge 1917
		return false;
1918
	}
2997 Serge 1919
	return radeon_ring_test_lockup(rdev, ring);
1963 serge 1920
}
1921
 
2997 Serge 1922
u32 r6xx_remap_render_backend(struct radeon_device *rdev,
1923
			      u32 tiling_pipe_num,
1924
			      u32 max_rb_num,
1925
			      u32 total_max_rb_num,
1926
			      u32 disabled_rb_mask)
1221 serge 1927
{
2997 Serge 1928
	u32 rendering_pipe_num, rb_num_width, req_rb_num;
3764 Serge 1929
	u32 pipe_rb_ratio, pipe_rb_remain, tmp;
2997 Serge 1930
	u32 data = 0, mask = 1 << (max_rb_num - 1);
1931
	unsigned i, j;
1221 serge 1932
 
2997 Serge 1933
	/* mask out the RBs that don't exist on that asic */
3764 Serge 1934
	tmp = disabled_rb_mask | ((0xff << max_rb_num) & 0xff);
1935
	/* make sure at least one RB is available */
1936
	if ((tmp & 0xff) != 0xff)
1937
		disabled_rb_mask = tmp;
1221 serge 1938
 
2997 Serge 1939
	rendering_pipe_num = 1 << tiling_pipe_num;
1940
	req_rb_num = total_max_rb_num - r600_count_pipe_bits(disabled_rb_mask);
1941
	BUG_ON(rendering_pipe_num < req_rb_num);
1221 serge 1942
 
2997 Serge 1943
	pipe_rb_ratio = rendering_pipe_num / req_rb_num;
1944
	pipe_rb_remain = rendering_pipe_num - pipe_rb_ratio * req_rb_num;
1221 serge 1945
 
2997 Serge 1946
	if (rdev->family <= CHIP_RV740) {
1947
		/* r6xx/r7xx */
1948
		rb_num_width = 2;
1949
	} else {
1950
		/* eg+ */
1951
		rb_num_width = 4;
6104 serge 1952
	}
1221 serge 1953
 
2997 Serge 1954
	for (i = 0; i < max_rb_num; i++) {
1955
		if (!(mask & disabled_rb_mask)) {
1956
			for (j = 0; j < pipe_rb_ratio; j++) {
1957
				data <<= rb_num_width;
1958
				data |= max_rb_num - i - 1;
6104 serge 1959
			}
2997 Serge 1960
			if (pipe_rb_remain) {
1961
				data <<= rb_num_width;
1962
				data |= max_rb_num - i - 1;
1963
				pipe_rb_remain--;
6104 serge 1964
			}
1965
		}
2997 Serge 1966
		mask >>= 1;
1967
	}
1221 serge 1968
 
2997 Serge 1969
	return data;
1221 serge 1970
}
1971
 
1972
int r600_count_pipe_bits(uint32_t val)
1973
{
3192 Serge 1974
	return hweight32(val);
1221 serge 1975
}
1976
 
2997 Serge 1977
static void r600_gpu_init(struct radeon_device *rdev)
1221 serge 1978
{
1979
	u32 tiling_config;
1980
	u32 ramcfg;
1430 serge 1981
	u32 cc_gc_shader_pipe_config;
1221 serge 1982
	u32 tmp;
1983
	int i, j;
1984
	u32 sq_config;
1985
	u32 sq_gpr_resource_mgmt_1 = 0;
1986
	u32 sq_gpr_resource_mgmt_2 = 0;
1987
	u32 sq_thread_resource_mgmt = 0;
1988
	u32 sq_stack_resource_mgmt_1 = 0;
1989
	u32 sq_stack_resource_mgmt_2 = 0;
2997 Serge 1990
	u32 disabled_rb_mask;
1221 serge 1991
 
2997 Serge 1992
	rdev->config.r600.tiling_group_size = 256;
1221 serge 1993
	switch (rdev->family) {
1994
	case CHIP_R600:
1995
		rdev->config.r600.max_pipes = 4;
1996
		rdev->config.r600.max_tile_pipes = 8;
1997
		rdev->config.r600.max_simds = 4;
1998
		rdev->config.r600.max_backends = 4;
1999
		rdev->config.r600.max_gprs = 256;
2000
		rdev->config.r600.max_threads = 192;
2001
		rdev->config.r600.max_stack_entries = 256;
2002
		rdev->config.r600.max_hw_contexts = 8;
2003
		rdev->config.r600.max_gs_threads = 16;
2004
		rdev->config.r600.sx_max_export_size = 128;
2005
		rdev->config.r600.sx_max_export_pos_size = 16;
2006
		rdev->config.r600.sx_max_export_smx_size = 128;
2007
		rdev->config.r600.sq_num_cf_insts = 2;
2008
		break;
2009
	case CHIP_RV630:
2010
	case CHIP_RV635:
2011
		rdev->config.r600.max_pipes = 2;
2012
		rdev->config.r600.max_tile_pipes = 2;
2013
		rdev->config.r600.max_simds = 3;
2014
		rdev->config.r600.max_backends = 1;
2015
		rdev->config.r600.max_gprs = 128;
2016
		rdev->config.r600.max_threads = 192;
2017
		rdev->config.r600.max_stack_entries = 128;
2018
		rdev->config.r600.max_hw_contexts = 8;
2019
		rdev->config.r600.max_gs_threads = 4;
2020
		rdev->config.r600.sx_max_export_size = 128;
2021
		rdev->config.r600.sx_max_export_pos_size = 16;
2022
		rdev->config.r600.sx_max_export_smx_size = 128;
2023
		rdev->config.r600.sq_num_cf_insts = 2;
2024
		break;
2025
	case CHIP_RV610:
2026
	case CHIP_RV620:
2027
	case CHIP_RS780:
2028
	case CHIP_RS880:
2029
		rdev->config.r600.max_pipes = 1;
2030
		rdev->config.r600.max_tile_pipes = 1;
2031
		rdev->config.r600.max_simds = 2;
2032
		rdev->config.r600.max_backends = 1;
2033
		rdev->config.r600.max_gprs = 128;
2034
		rdev->config.r600.max_threads = 192;
2035
		rdev->config.r600.max_stack_entries = 128;
2036
		rdev->config.r600.max_hw_contexts = 4;
2037
		rdev->config.r600.max_gs_threads = 4;
2038
		rdev->config.r600.sx_max_export_size = 128;
2039
		rdev->config.r600.sx_max_export_pos_size = 16;
2040
		rdev->config.r600.sx_max_export_smx_size = 128;
2041
		rdev->config.r600.sq_num_cf_insts = 1;
2042
		break;
2043
	case CHIP_RV670:
2044
		rdev->config.r600.max_pipes = 4;
2045
		rdev->config.r600.max_tile_pipes = 4;
2046
		rdev->config.r600.max_simds = 4;
2047
		rdev->config.r600.max_backends = 4;
2048
		rdev->config.r600.max_gprs = 192;
2049
		rdev->config.r600.max_threads = 192;
2050
		rdev->config.r600.max_stack_entries = 256;
2051
		rdev->config.r600.max_hw_contexts = 8;
2052
		rdev->config.r600.max_gs_threads = 16;
2053
		rdev->config.r600.sx_max_export_size = 128;
2054
		rdev->config.r600.sx_max_export_pos_size = 16;
2055
		rdev->config.r600.sx_max_export_smx_size = 128;
2056
		rdev->config.r600.sq_num_cf_insts = 2;
2057
		break;
2058
	default:
2059
		break;
2060
	}
2061
 
2062
	/* Initialize HDP */
2063
	for (i = 0, j = 0; i < 32; i++, j += 0x18) {
2064
		WREG32((0x2c14 + j), 0x00000000);
2065
		WREG32((0x2c18 + j), 0x00000000);
2066
		WREG32((0x2c1c + j), 0x00000000);
2067
		WREG32((0x2c20 + j), 0x00000000);
2068
		WREG32((0x2c24 + j), 0x00000000);
2069
	}
2070
 
2071
	WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
2072
 
2073
	/* Setup tiling */
2074
	tiling_config = 0;
2075
	ramcfg = RREG32(RAMCFG);
2076
	switch (rdev->config.r600.max_tile_pipes) {
2077
	case 1:
2078
		tiling_config |= PIPE_TILING(0);
2079
		break;
2080
	case 2:
2081
		tiling_config |= PIPE_TILING(1);
2082
		break;
2083
	case 4:
2084
		tiling_config |= PIPE_TILING(2);
2085
		break;
2086
	case 8:
2087
		tiling_config |= PIPE_TILING(3);
2088
		break;
2089
	default:
2090
		break;
2091
	}
1430 serge 2092
	rdev->config.r600.tiling_npipes = rdev->config.r600.max_tile_pipes;
2093
	rdev->config.r600.tiling_nbanks = 4 << ((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
1221 serge 2094
	tiling_config |= BANK_TILING((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
1963 serge 2095
	tiling_config |= GROUP_SIZE((ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT);
2997 Serge 2096
 
1221 serge 2097
	tmp = (ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT;
2098
	if (tmp > 3) {
2099
		tiling_config |= ROW_TILING(3);
2100
		tiling_config |= SAMPLE_SPLIT(3);
2101
	} else {
2102
		tiling_config |= ROW_TILING(tmp);
2103
		tiling_config |= SAMPLE_SPLIT(tmp);
2104
	}
2105
	tiling_config |= BANK_SWAPS(1);
1430 serge 2106
 
2997 Serge 2107
	cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0x00ffff00;
5078 serge 2108
	tmp = rdev->config.r600.max_simds -
2997 Serge 2109
		r600_count_pipe_bits((cc_gc_shader_pipe_config >> 16) & R6XX_MAX_SIMDS_MASK);
5078 serge 2110
	rdev->config.r600.active_simds = tmp;
1430 serge 2111
 
2997 Serge 2112
	disabled_rb_mask = (RREG32(CC_RB_BACKEND_DISABLE) >> 16) & R6XX_MAX_BACKENDS_MASK;
5078 serge 2113
	tmp = 0;
2114
	for (i = 0; i < rdev->config.r600.max_backends; i++)
2115
		tmp |= (1 << i);
2116
	/* if all the backends are disabled, fix it up here */
2117
	if ((disabled_rb_mask & tmp) == tmp) {
2118
		for (i = 0; i < rdev->config.r600.max_backends; i++)
2119
			disabled_rb_mask &= ~(1 << i);
2120
	}
2997 Serge 2121
	tmp = (tiling_config & PIPE_TILING__MASK) >> PIPE_TILING__SHIFT;
2122
	tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.r600.max_backends,
2123
					R6XX_MAX_BACKENDS, disabled_rb_mask);
2124
	tiling_config |= tmp << 16;
2125
	rdev->config.r600.backend_map = tmp;
2126
 
1963 serge 2127
	rdev->config.r600.tile_config = tiling_config;
1221 serge 2128
	WREG32(GB_TILING_CONFIG, tiling_config);
2129
	WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff);
2130
	WREG32(HDP_TILING_CONFIG, tiling_config & 0xffff);
3192 Serge 2131
	WREG32(DMA_TILING_CONFIG, tiling_config & 0xffff);
1221 serge 2132
 
1430 serge 2133
	tmp = R6XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
1221 serge 2134
	WREG32(VGT_OUT_DEALLOC_CNTL, (tmp * 4) & DEALLOC_DIST_MASK);
2135
	WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((tmp * 4) - 2) & VTX_REUSE_DEPTH_MASK);
2136
 
2137
	/* Setup some CP states */
2138
	WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) | ROQ_IB2_START(0x2b)));
2139
	WREG32(CP_MEQ_THRESHOLDS, (MEQ_END(0x40) | ROQ_END(0x40)));
2140
 
2141
	WREG32(TA_CNTL_AUX, (DISABLE_CUBE_ANISO | SYNC_GRADIENT |
2142
			     SYNC_WALKER | SYNC_ALIGNER));
2143
	/* Setup various GPU states */
2144
	if (rdev->family == CHIP_RV670)
2145
		WREG32(ARB_GDEC_RD_CNTL, 0x00000021);
2146
 
2147
	tmp = RREG32(SX_DEBUG_1);
2148
	tmp |= SMX_EVENT_RELEASE;
2149
	if ((rdev->family > CHIP_R600))
2150
		tmp |= ENABLE_NEW_SMX_ADDRESS;
2151
	WREG32(SX_DEBUG_1, tmp);
2152
 
2153
	if (((rdev->family) == CHIP_R600) ||
2154
	    ((rdev->family) == CHIP_RV630) ||
2155
	    ((rdev->family) == CHIP_RV610) ||
2156
	    ((rdev->family) == CHIP_RV620) ||
1268 serge 2157
	    ((rdev->family) == CHIP_RS780) ||
2158
	    ((rdev->family) == CHIP_RS880)) {
1221 serge 2159
		WREG32(DB_DEBUG, PREZ_MUST_WAIT_FOR_POSTZ_DONE);
2160
	} else {
2161
		WREG32(DB_DEBUG, 0);
2162
	}
2163
	WREG32(DB_WATERMARKS, (DEPTH_FREE(4) | DEPTH_CACHELINE_FREE(16) |
2164
			       DEPTH_FLUSH(16) | DEPTH_PENDING_FREE(4)));
2165
 
2166
	WREG32(PA_SC_MULTI_CHIP_CNTL, 0);
2167
	WREG32(VGT_NUM_INSTANCES, 0);
2168
 
2169
	WREG32(SPI_CONFIG_CNTL, GPR_WRITE_PRIORITY(0));
2170
	WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(0));
2171
 
2172
	tmp = RREG32(SQ_MS_FIFO_SIZES);
2173
	if (((rdev->family) == CHIP_RV610) ||
2174
	    ((rdev->family) == CHIP_RV620) ||
1268 serge 2175
	    ((rdev->family) == CHIP_RS780) ||
2176
	    ((rdev->family) == CHIP_RS880)) {
1221 serge 2177
		tmp = (CACHE_FIFO_SIZE(0xa) |
2178
		       FETCH_FIFO_HIWATER(0xa) |
2179
		       DONE_FIFO_HIWATER(0xe0) |
2180
		       ALU_UPDATE_FIFO_HIWATER(0x8));
2181
	} else if (((rdev->family) == CHIP_R600) ||
2182
		   ((rdev->family) == CHIP_RV630)) {
2183
		tmp &= ~DONE_FIFO_HIWATER(0xff);
2184
		tmp |= DONE_FIFO_HIWATER(0x4);
2185
	}
2186
	WREG32(SQ_MS_FIFO_SIZES, tmp);
2187
 
2188
	/* SQ_CONFIG, SQ_GPR_RESOURCE_MGMT, SQ_THREAD_RESOURCE_MGMT, SQ_STACK_RESOURCE_MGMT
2189
	 * should be adjusted as needed by the 2D/3D drivers.  This just sets default values
2190
	 */
2191
	sq_config = RREG32(SQ_CONFIG);
2192
	sq_config &= ~(PS_PRIO(3) |
2193
		       VS_PRIO(3) |
2194
		       GS_PRIO(3) |
2195
		       ES_PRIO(3));
2196
	sq_config |= (DX9_CONSTS |
2197
		      VC_ENABLE |
2198
		      PS_PRIO(0) |
2199
		      VS_PRIO(1) |
2200
		      GS_PRIO(2) |
2201
		      ES_PRIO(3));
2202
 
2203
	if ((rdev->family) == CHIP_R600) {
2204
		sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(124) |
2205
					  NUM_VS_GPRS(124) |
2206
					  NUM_CLAUSE_TEMP_GPRS(4));
2207
		sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(0) |
2208
					  NUM_ES_GPRS(0));
2209
		sq_thread_resource_mgmt = (NUM_PS_THREADS(136) |
2210
					   NUM_VS_THREADS(48) |
2211
					   NUM_GS_THREADS(4) |
2212
					   NUM_ES_THREADS(4));
2213
		sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(128) |
2214
					    NUM_VS_STACK_ENTRIES(128));
2215
		sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(0) |
2216
					    NUM_ES_STACK_ENTRIES(0));
2217
	} else if (((rdev->family) == CHIP_RV610) ||
2218
		   ((rdev->family) == CHIP_RV620) ||
1268 serge 2219
		   ((rdev->family) == CHIP_RS780) ||
2220
		   ((rdev->family) == CHIP_RS880)) {
1221 serge 2221
		/* no vertex cache */
2222
		sq_config &= ~VC_ENABLE;
2223
 
2224
		sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
2225
					  NUM_VS_GPRS(44) |
2226
					  NUM_CLAUSE_TEMP_GPRS(2));
2227
		sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(17) |
2228
					  NUM_ES_GPRS(17));
2229
		sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
2230
					   NUM_VS_THREADS(78) |
2231
					   NUM_GS_THREADS(4) |
2232
					   NUM_ES_THREADS(31));
2233
		sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(40) |
2234
					    NUM_VS_STACK_ENTRIES(40));
2235
		sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(32) |
2236
					    NUM_ES_STACK_ENTRIES(16));
2237
	} else if (((rdev->family) == CHIP_RV630) ||
2238
		   ((rdev->family) == CHIP_RV635)) {
2239
		sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
2240
					  NUM_VS_GPRS(44) |
2241
					  NUM_CLAUSE_TEMP_GPRS(2));
2242
		sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(18) |
2243
					  NUM_ES_GPRS(18));
2244
		sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
2245
					   NUM_VS_THREADS(78) |
2246
					   NUM_GS_THREADS(4) |
2247
					   NUM_ES_THREADS(31));
2248
		sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(40) |
2249
					    NUM_VS_STACK_ENTRIES(40));
2250
		sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(32) |
2251
					    NUM_ES_STACK_ENTRIES(16));
2252
	} else if ((rdev->family) == CHIP_RV670) {
2253
		sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
2254
					  NUM_VS_GPRS(44) |
2255
					  NUM_CLAUSE_TEMP_GPRS(2));
2256
		sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(17) |
2257
					  NUM_ES_GPRS(17));
2258
		sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
2259
					   NUM_VS_THREADS(78) |
2260
					   NUM_GS_THREADS(4) |
2261
					   NUM_ES_THREADS(31));
2262
		sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(64) |
2263
					    NUM_VS_STACK_ENTRIES(64));
2264
		sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(64) |
2265
					    NUM_ES_STACK_ENTRIES(64));
2266
	}
2267
 
2268
	WREG32(SQ_CONFIG, sq_config);
2269
	WREG32(SQ_GPR_RESOURCE_MGMT_1,  sq_gpr_resource_mgmt_1);
2270
	WREG32(SQ_GPR_RESOURCE_MGMT_2,  sq_gpr_resource_mgmt_2);
2271
	WREG32(SQ_THREAD_RESOURCE_MGMT, sq_thread_resource_mgmt);
2272
	WREG32(SQ_STACK_RESOURCE_MGMT_1, sq_stack_resource_mgmt_1);
2273
	WREG32(SQ_STACK_RESOURCE_MGMT_2, sq_stack_resource_mgmt_2);
2274
 
2275
	if (((rdev->family) == CHIP_RV610) ||
2276
	    ((rdev->family) == CHIP_RV620) ||
1268 serge 2277
	    ((rdev->family) == CHIP_RS780) ||
2278
	    ((rdev->family) == CHIP_RS880)) {
1221 serge 2279
		WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(TC_ONLY));
2280
	} else {
2281
		WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC));
2282
	}
2283
 
2284
	/* More default values. 2D/3D driver should adjust as needed */
2285
	WREG32(PA_SC_AA_SAMPLE_LOCS_2S, (S0_X(0xc) | S0_Y(0x4) |
2286
					 S1_X(0x4) | S1_Y(0xc)));
2287
	WREG32(PA_SC_AA_SAMPLE_LOCS_4S, (S0_X(0xe) | S0_Y(0xe) |
2288
					 S1_X(0x2) | S1_Y(0x2) |
2289
					 S2_X(0xa) | S2_Y(0x6) |
2290
					 S3_X(0x6) | S3_Y(0xa)));
2291
	WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD0, (S0_X(0xe) | S0_Y(0xb) |
2292
					     S1_X(0x4) | S1_Y(0xc) |
2293
					     S2_X(0x1) | S2_Y(0x6) |
2294
					     S3_X(0xa) | S3_Y(0xe)));
2295
	WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD1, (S4_X(0x6) | S4_Y(0x1) |
2296
					     S5_X(0x0) | S5_Y(0x0) |
2297
					     S6_X(0xb) | S6_Y(0x4) |
2298
					     S7_X(0x7) | S7_Y(0x8)));
2299
 
2300
	WREG32(VGT_STRMOUT_EN, 0);
2301
	tmp = rdev->config.r600.max_pipes * 16;
2302
	switch (rdev->family) {
2303
	case CHIP_RV610:
1268 serge 2304
	case CHIP_RV620:
1221 serge 2305
	case CHIP_RS780:
1268 serge 2306
	case CHIP_RS880:
1221 serge 2307
		tmp += 32;
2308
		break;
2309
	case CHIP_RV670:
2310
		tmp += 128;
2311
		break;
2312
	default:
2313
		break;
2314
	}
2315
	if (tmp > 256) {
2316
		tmp = 256;
2317
	}
2318
	WREG32(VGT_ES_PER_GS, 128);
2319
	WREG32(VGT_GS_PER_ES, tmp);
2320
	WREG32(VGT_GS_PER_VS, 2);
2321
	WREG32(VGT_GS_VERTEX_REUSE, 16);
2322
 
2323
	/* more default values. 2D/3D driver should adjust as needed */
2324
	WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
2325
	WREG32(VGT_STRMOUT_EN, 0);
2326
	WREG32(SX_MISC, 0);
2327
	WREG32(PA_SC_MODE_CNTL, 0);
2328
	WREG32(PA_SC_AA_CONFIG, 0);
2329
	WREG32(PA_SC_LINE_STIPPLE, 0);
2330
	WREG32(SPI_INPUT_Z, 0);
2331
	WREG32(SPI_PS_IN_CONTROL_0, NUM_INTERP(2));
2332
	WREG32(CB_COLOR7_FRAG, 0);
2333
 
2334
	/* Clear render buffer base addresses */
2335
	WREG32(CB_COLOR0_BASE, 0);
2336
	WREG32(CB_COLOR1_BASE, 0);
2337
	WREG32(CB_COLOR2_BASE, 0);
2338
	WREG32(CB_COLOR3_BASE, 0);
2339
	WREG32(CB_COLOR4_BASE, 0);
2340
	WREG32(CB_COLOR5_BASE, 0);
2341
	WREG32(CB_COLOR6_BASE, 0);
2342
	WREG32(CB_COLOR7_BASE, 0);
2343
	WREG32(CB_COLOR7_FRAG, 0);
2344
 
2345
	switch (rdev->family) {
2346
	case CHIP_RV610:
1268 serge 2347
	case CHIP_RV620:
1221 serge 2348
	case CHIP_RS780:
1268 serge 2349
	case CHIP_RS880:
1221 serge 2350
		tmp = TC_L2_SIZE(8);
2351
		break;
2352
	case CHIP_RV630:
2353
	case CHIP_RV635:
2354
		tmp = TC_L2_SIZE(4);
2355
		break;
2356
	case CHIP_R600:
2357
		tmp = TC_L2_SIZE(0) | L2_DISABLE_LATE_HIT;
2358
		break;
2359
	default:
2360
		tmp = TC_L2_SIZE(0);
2361
		break;
2362
	}
2363
	WREG32(TC_CNTL, tmp);
2364
 
2365
	tmp = RREG32(HDP_HOST_PATH_CNTL);
2366
	WREG32(HDP_HOST_PATH_CNTL, tmp);
2367
 
2368
	tmp = RREG32(ARB_POP);
2369
	tmp |= ENABLE_TC128;
2370
	WREG32(ARB_POP, tmp);
2371
 
2372
	WREG32(PA_SC_MULTI_CHIP_CNTL, 0);
2373
	WREG32(PA_CL_ENHANCE, (CLIP_VTX_REORDER_ENA |
2374
			       NUM_CLIP_SEQ(3)));
2375
	WREG32(PA_SC_ENHANCE, FORCE_EOV_MAX_CLK_CNT(4095));
2997 Serge 2376
	WREG32(VC_ENHANCE, 0);
1221 serge 2377
}
2378
 
2379
 
1128 serge 2380
/*
2381
 * Indirect registers accessor
2382
 */
1221 serge 2383
u32 r600_pciep_rreg(struct radeon_device *rdev, u32 reg)
1128 serge 2384
{
5078 serge 2385
	unsigned long flags;
1221 serge 2386
	u32 r;
1128 serge 2387
 
5078 serge 2388
	spin_lock_irqsave(&rdev->pciep_idx_lock, flags);
1221 serge 2389
	WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
2390
	(void)RREG32(PCIE_PORT_INDEX);
2391
	r = RREG32(PCIE_PORT_DATA);
5078 serge 2392
	spin_unlock_irqrestore(&rdev->pciep_idx_lock, flags);
1128 serge 2393
	return r;
2394
}
2395
 
1221 serge 2396
void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v)
1128 serge 2397
{
5078 serge 2398
	unsigned long flags;
2399
 
2400
	spin_lock_irqsave(&rdev->pciep_idx_lock, flags);
1221 serge 2401
	WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
2402
	(void)RREG32(PCIE_PORT_INDEX);
2403
	WREG32(PCIE_PORT_DATA, (v));
2404
	(void)RREG32(PCIE_PORT_DATA);
5078 serge 2405
	spin_unlock_irqrestore(&rdev->pciep_idx_lock, flags);
1128 serge 2406
}
1221 serge 2407
 
2408
/*
2409
 * CP & Ring
2410
 */
2411
void r600_cp_stop(struct radeon_device *rdev)
2412
{
5078 serge 2413
	if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX)
6104 serge 2414
		radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
1221 serge 2415
	WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
1963 serge 2416
	WREG32(SCRATCH_UMSK, 0);
3192 Serge 2417
	rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
1221 serge 2418
}
1413 serge 2419
 
2420
int r600_init_microcode(struct radeon_device *rdev)
2421
{
2422
	const char *chip_name;
2423
	const char *rlc_chip_name;
5078 serge 2424
	const char *smc_chip_name = "RV770";
2425
	size_t pfp_req_size, me_req_size, rlc_req_size, smc_req_size = 0;
1413 serge 2426
	char fw_name[30];
2427
	int err;
2428
 
2429
	DRM_DEBUG("\n");
2430
 
2431
	switch (rdev->family) {
2432
	case CHIP_R600:
2433
		chip_name = "R600";
2434
		rlc_chip_name = "R600";
2435
		break;
2436
	case CHIP_RV610:
2437
		chip_name = "RV610";
2438
		rlc_chip_name = "R600";
2439
		break;
2440
	case CHIP_RV630:
2441
		chip_name = "RV630";
2442
		rlc_chip_name = "R600";
2443
		break;
2444
	case CHIP_RV620:
2445
		chip_name = "RV620";
2446
		rlc_chip_name = "R600";
2447
		break;
2448
	case CHIP_RV635:
2449
		chip_name = "RV635";
2450
		rlc_chip_name = "R600";
2451
		break;
2452
	case CHIP_RV670:
2453
		chip_name = "RV670";
2454
		rlc_chip_name = "R600";
2455
		break;
2456
	case CHIP_RS780:
2457
	case CHIP_RS880:
2458
		chip_name = "RS780";
2459
		rlc_chip_name = "R600";
2460
		break;
2461
	case CHIP_RV770:
2462
		chip_name = "RV770";
2463
		rlc_chip_name = "R700";
5078 serge 2464
		smc_chip_name = "RV770";
2465
		smc_req_size = ALIGN(RV770_SMC_UCODE_SIZE, 4);
1413 serge 2466
		break;
2467
	case CHIP_RV730:
2468
		chip_name = "RV730";
2469
		rlc_chip_name = "R700";
5078 serge 2470
		smc_chip_name = "RV730";
2471
		smc_req_size = ALIGN(RV730_SMC_UCODE_SIZE, 4);
1413 serge 2472
		break;
2473
	case CHIP_RV710:
2474
		chip_name = "RV710";
2475
		rlc_chip_name = "R700";
5078 serge 2476
		smc_chip_name = "RV710";
2477
		smc_req_size = ALIGN(RV710_SMC_UCODE_SIZE, 4);
1413 serge 2478
		break;
5078 serge 2479
	case CHIP_RV740:
2480
		chip_name = "RV730";
2481
		rlc_chip_name = "R700";
2482
		smc_chip_name = "RV740";
2483
		smc_req_size = ALIGN(RV740_SMC_UCODE_SIZE, 4);
2484
		break;
1963 serge 2485
	case CHIP_CEDAR:
2486
		chip_name = "CEDAR";
2487
		rlc_chip_name = "CEDAR";
5078 serge 2488
		smc_chip_name = "CEDAR";
2489
		smc_req_size = ALIGN(CEDAR_SMC_UCODE_SIZE, 4);
1963 serge 2490
		break;
2491
	case CHIP_REDWOOD:
2492
		chip_name = "REDWOOD";
2493
		rlc_chip_name = "REDWOOD";
5078 serge 2494
		smc_chip_name = "REDWOOD";
2495
		smc_req_size = ALIGN(REDWOOD_SMC_UCODE_SIZE, 4);
1963 serge 2496
		break;
2497
	case CHIP_JUNIPER:
2498
		chip_name = "JUNIPER";
2499
		rlc_chip_name = "JUNIPER";
5078 serge 2500
		smc_chip_name = "JUNIPER";
2501
		smc_req_size = ALIGN(JUNIPER_SMC_UCODE_SIZE, 4);
1963 serge 2502
		break;
2503
	case CHIP_CYPRESS:
2504
	case CHIP_HEMLOCK:
2505
		chip_name = "CYPRESS";
2506
		rlc_chip_name = "CYPRESS";
5078 serge 2507
		smc_chip_name = "CYPRESS";
2508
		smc_req_size = ALIGN(CYPRESS_SMC_UCODE_SIZE, 4);
1963 serge 2509
		break;
2510
	case CHIP_PALM:
2511
		chip_name = "PALM";
2512
		rlc_chip_name = "SUMO";
2513
		break;
1986 serge 2514
	case CHIP_SUMO:
2515
		chip_name = "SUMO";
2516
		rlc_chip_name = "SUMO";
2517
		break;
2518
	case CHIP_SUMO2:
2519
		chip_name = "SUMO2";
2520
		rlc_chip_name = "SUMO";
2521
		break;
1413 serge 2522
	default: BUG();
2523
	}
2524
 
1963 serge 2525
	if (rdev->family >= CHIP_CEDAR) {
2526
		pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4;
2527
		me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
2528
		rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
2529
	} else if (rdev->family >= CHIP_RV770) {
1413 serge 2530
		pfp_req_size = R700_PFP_UCODE_SIZE * 4;
2531
		me_req_size = R700_PM4_UCODE_SIZE * 4;
2532
		rlc_req_size = R700_RLC_UCODE_SIZE * 4;
2533
	} else {
5078 serge 2534
		pfp_req_size = R600_PFP_UCODE_SIZE * 4;
2535
		me_req_size = R600_PM4_UCODE_SIZE * 12;
2536
		rlc_req_size = R600_RLC_UCODE_SIZE * 4;
1413 serge 2537
	}
2538
 
2539
	DRM_INFO("Loading %s Microcode\n", chip_name);
2540
 
2541
	snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
5078 serge 2542
	err = request_firmware(&rdev->pfp_fw, fw_name, rdev->dev);
1413 serge 2543
	if (err)
2544
		goto out;
2545
	if (rdev->pfp_fw->size != pfp_req_size) {
2546
		printk(KERN_ERR
2547
		       "r600_cp: Bogus length %zu in firmware \"%s\"\n",
2548
		       rdev->pfp_fw->size, fw_name);
2549
		err = -EINVAL;
2550
		goto out;
2551
	}
2552
 
2553
	snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
5078 serge 2554
	err = request_firmware(&rdev->me_fw, fw_name, rdev->dev);
1413 serge 2555
	if (err)
2556
		goto out;
2557
	if (rdev->me_fw->size != me_req_size) {
2558
		printk(KERN_ERR
2559
		       "r600_cp: Bogus length %zu in firmware \"%s\"\n",
2560
		       rdev->me_fw->size, fw_name);
2561
		err = -EINVAL;
2562
	}
2563
 
2564
	snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name);
5078 serge 2565
	err = request_firmware(&rdev->rlc_fw, fw_name, rdev->dev);
1413 serge 2566
	if (err)
2567
		goto out;
2568
	if (rdev->rlc_fw->size != rlc_req_size) {
2569
		printk(KERN_ERR
2570
		       "r600_rlc: Bogus length %zu in firmware \"%s\"\n",
2571
		       rdev->rlc_fw->size, fw_name);
2572
		err = -EINVAL;
2573
	}
2574
 
5078 serge 2575
	if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_HEMLOCK)) {
2576
		snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", smc_chip_name);
2577
		err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev);
2578
		if (err) {
2579
			printk(KERN_ERR
2580
			       "smc: error loading firmware \"%s\"\n",
2581
			       fw_name);
2582
			release_firmware(rdev->smc_fw);
2583
			rdev->smc_fw = NULL;
2584
			err = 0;
2585
		} else if (rdev->smc_fw->size != smc_req_size) {
2586
			printk(KERN_ERR
2587
			       "smc: Bogus length %zu in firmware \"%s\"\n",
2588
			       rdev->smc_fw->size, fw_name);
2589
			err = -EINVAL;
2590
		}
2591
	}
2592
 
1413 serge 2593
out:
2594
	if (err) {
2595
		if (err != -EINVAL)
2596
			printk(KERN_ERR
2597
			       "r600_cp: Failed to load firmware \"%s\"\n",
2598
			       fw_name);
2599
		release_firmware(rdev->pfp_fw);
2600
		rdev->pfp_fw = NULL;
2601
		release_firmware(rdev->me_fw);
2602
		rdev->me_fw = NULL;
2603
		release_firmware(rdev->rlc_fw);
2604
		rdev->rlc_fw = NULL;
5078 serge 2605
		release_firmware(rdev->smc_fw);
2606
		rdev->smc_fw = NULL;
1413 serge 2607
	}
2608
	return err;
2609
}
2610
 
5078 serge 2611
u32 r600_gfx_get_rptr(struct radeon_device *rdev,
2612
		      struct radeon_ring *ring)
2613
{
2614
	u32 rptr;
2615
 
2616
	if (rdev->wb.enabled)
2617
		rptr = rdev->wb.wb[ring->rptr_offs/4];
2618
	else
2619
		rptr = RREG32(R600_CP_RB_RPTR);
2620
 
2621
	return rptr;
2622
}
2623
 
2624
u32 r600_gfx_get_wptr(struct radeon_device *rdev,
2625
		      struct radeon_ring *ring)
2626
{
2627
	u32 wptr;
2628
 
2629
	wptr = RREG32(R600_CP_RB_WPTR);
2630
 
2631
	return wptr;
2632
}
2633
 
2634
void r600_gfx_set_wptr(struct radeon_device *rdev,
2635
		       struct radeon_ring *ring)
2636
{
2637
	WREG32(R600_CP_RB_WPTR, ring->wptr);
2638
	(void)RREG32(R600_CP_RB_WPTR);
2639
}
2640
 
1413 serge 2641
static int r600_cp_load_microcode(struct radeon_device *rdev)
2642
{
2643
	const __be32 *fw_data;
2644
	int i;
2645
 
2646
	if (!rdev->me_fw || !rdev->pfp_fw)
2647
		return -EINVAL;
2648
 
2649
	r600_cp_stop(rdev);
2650
 
1963 serge 2651
	WREG32(CP_RB_CNTL,
2652
#ifdef __BIG_ENDIAN
2653
	       BUF_SWAP_32BIT |
2654
#endif
2655
	       RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3));
1413 serge 2656
 
2657
	/* Reset cp */
2658
	WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
2659
	RREG32(GRBM_SOFT_RESET);
2660
	mdelay(15);
2661
	WREG32(GRBM_SOFT_RESET, 0);
2662
 
2663
	WREG32(CP_ME_RAM_WADDR, 0);
2664
 
2665
	fw_data = (const __be32 *)rdev->me_fw->data;
2666
	WREG32(CP_ME_RAM_WADDR, 0);
5078 serge 2667
	for (i = 0; i < R600_PM4_UCODE_SIZE * 3; i++)
1413 serge 2668
		WREG32(CP_ME_RAM_DATA,
2669
		       be32_to_cpup(fw_data++));
2670
 
2671
	fw_data = (const __be32 *)rdev->pfp_fw->data;
2672
	WREG32(CP_PFP_UCODE_ADDR, 0);
5078 serge 2673
	for (i = 0; i < R600_PFP_UCODE_SIZE; i++)
1413 serge 2674
		WREG32(CP_PFP_UCODE_DATA,
2675
		       be32_to_cpup(fw_data++));
2676
 
2677
	WREG32(CP_PFP_UCODE_ADDR, 0);
2678
	WREG32(CP_ME_RAM_WADDR, 0);
2679
	WREG32(CP_ME_RAM_RADDR, 0);
2680
	return 0;
2681
}
2682
 
1221 serge 2683
int r600_cp_start(struct radeon_device *rdev)
2684
{
2997 Serge 2685
	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
1221 serge 2686
	int r;
2687
	uint32_t cp_me;
2688
 
2997 Serge 2689
	r = radeon_ring_lock(rdev, ring, 7);
1221 serge 2690
	if (r) {
2691
		DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
2692
		return r;
2693
	}
2997 Serge 2694
	radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5));
2695
	radeon_ring_write(ring, 0x1);
1963 serge 2696
	if (rdev->family >= CHIP_RV770) {
2997 Serge 2697
		radeon_ring_write(ring, 0x0);
2698
		radeon_ring_write(ring, rdev->config.rv770.max_hw_contexts - 1);
1963 serge 2699
	} else {
2997 Serge 2700
		radeon_ring_write(ring, 0x3);
2701
		radeon_ring_write(ring, rdev->config.r600.max_hw_contexts - 1);
1221 serge 2702
	}
2997 Serge 2703
	radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
2704
	radeon_ring_write(ring, 0);
2705
	radeon_ring_write(ring, 0);
5078 serge 2706
	radeon_ring_unlock_commit(rdev, ring, false);
1221 serge 2707
 
2708
	cp_me = 0xff;
2709
	WREG32(R_0086D8_CP_ME_CNTL, cp_me);
2710
	return 0;
2711
}
1413 serge 2712
 
2713
int r600_cp_resume(struct radeon_device *rdev)
2714
{
2997 Serge 2715
	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
1413 serge 2716
	u32 tmp;
2717
	u32 rb_bufsz;
2718
	int r;
2719
 
2720
	/* Reset cp */
2721
	WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
2722
	RREG32(GRBM_SOFT_RESET);
2723
	mdelay(15);
2724
	WREG32(GRBM_SOFT_RESET, 0);
2725
 
2726
	/* Set ring buffer size */
5078 serge 2727
	rb_bufsz = order_base_2(ring->ring_size / 8);
2728
	tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
1413 serge 2729
#ifdef __BIG_ENDIAN
2730
	tmp |= BUF_SWAP_32BIT;
2731
#endif
2732
	WREG32(CP_RB_CNTL, tmp);
2997 Serge 2733
	WREG32(CP_SEM_WAIT_TIMER, 0x0);
1413 serge 2734
 
2735
	/* Set the write pointer delay */
2736
	WREG32(CP_RB_WPTR_DELAY, 0);
2737
 
2738
	/* Initialize the ring buffer's read and write pointers */
2739
	WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
2740
	WREG32(CP_RB_RPTR_WR, 0);
2997 Serge 2741
	ring->wptr = 0;
2742
	WREG32(CP_RB_WPTR, ring->wptr);
1963 serge 2743
 
2744
	/* set the wb address whether it's enabled or not */
2745
	WREG32(CP_RB_RPTR_ADDR,
2746
	       ((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC));
2747
	WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
2748
	WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
2749
 
2750
	if (rdev->wb.enabled)
2751
		WREG32(SCRATCH_UMSK, 0xff);
2752
	else {
2753
		tmp |= RB_NO_UPDATE;
2754
		WREG32(SCRATCH_UMSK, 0);
2755
	}
2756
 
1413 serge 2757
	mdelay(1);
2758
	WREG32(CP_RB_CNTL, tmp);
2759
 
2997 Serge 2760
	WREG32(CP_RB_BASE, ring->gpu_addr >> 8);
1413 serge 2761
	WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
2762
 
2763
	r600_cp_start(rdev);
2997 Serge 2764
	ring->ready = true;
2765
	r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, ring);
1413 serge 2766
	if (r) {
2997 Serge 2767
		ring->ready = false;
1413 serge 2768
		return r;
2769
	}
5078 serge 2770
 
2771
	if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX)
2772
		radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
2773
 
1413 serge 2774
	return 0;
2775
}
2776
 
2997 Serge 2777
void r600_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ring_size)
1221 serge 2778
{
1233 serge 2779
	u32 rb_bufsz;
2997 Serge 2780
	int r;
1221 serge 2781
 
1233 serge 2782
	/* Align ring size */
5078 serge 2783
	rb_bufsz = order_base_2(ring_size / 8);
1233 serge 2784
	ring_size = (1 << (rb_bufsz + 1)) * 4;
2997 Serge 2785
	ring->ring_size = ring_size;
2786
	ring->align_mask = 16 - 1;
2787
 
2788
	if (radeon_ring_supports_scratch_reg(rdev, ring)) {
2789
		r = radeon_scratch_get(rdev, &ring->rptr_save_reg);
2790
		if (r) {
2791
			DRM_ERROR("failed to get scratch reg for rptr save (%d).\n", r);
2792
			ring->rptr_save_reg = 0;
2793
		}
2794
	}
1233 serge 2795
}
2796
 
1963 serge 2797
void r600_cp_fini(struct radeon_device *rdev)
2798
{
2997 Serge 2799
	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
1963 serge 2800
	r600_cp_stop(rdev);
2997 Serge 2801
	radeon_ring_fini(rdev, ring);
2802
	radeon_scratch_free(rdev, ring->rptr_save_reg);
1963 serge 2803
}
1233 serge 2804
 
3192 Serge 2805
/*
1233 serge 2806
 * GPU scratch registers helpers function.
2807
 */
2808
void r600_scratch_init(struct radeon_device *rdev)
2809
{
2810
	int i;
2811
 
2812
	rdev->scratch.num_reg = 7;
1963 serge 2813
	rdev->scratch.reg_base = SCRATCH_REG0;
1233 serge 2814
	for (i = 0; i < rdev->scratch.num_reg; i++) {
2815
		rdev->scratch.free[i] = true;
1963 serge 2816
		rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
1233 serge 2817
	}
2818
}
1413 serge 2819
 
2997 Serge 2820
int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
1413 serge 2821
{
2822
	uint32_t scratch;
2823
	uint32_t tmp = 0;
2824
	unsigned i;
2825
	int r;
2826
 
2827
	r = radeon_scratch_get(rdev, &scratch);
2828
	if (r) {
2829
		DRM_ERROR("radeon: cp failed to get scratch reg (%d).\n", r);
2830
		return r;
2831
	}
2832
	WREG32(scratch, 0xCAFEDEAD);
2997 Serge 2833
	r = radeon_ring_lock(rdev, ring, 3);
1413 serge 2834
	if (r) {
2997 Serge 2835
		DRM_ERROR("radeon: cp failed to lock ring %d (%d).\n", ring->idx, r);
1413 serge 2836
		radeon_scratch_free(rdev, scratch);
2837
		return r;
2838
	}
2997 Serge 2839
	radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2840
	radeon_ring_write(ring, ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
2841
	radeon_ring_write(ring, 0xDEADBEEF);
5078 serge 2842
	radeon_ring_unlock_commit(rdev, ring, false);
1413 serge 2843
	for (i = 0; i < rdev->usec_timeout; i++) {
2844
		tmp = RREG32(scratch);
2845
		if (tmp == 0xDEADBEEF)
2846
			break;
2847
		DRM_UDELAY(1);
2848
	}
2849
	if (i < rdev->usec_timeout) {
2997 Serge 2850
		DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
1413 serge 2851
	} else {
2997 Serge 2852
		DRM_ERROR("radeon: ring %d test failed (scratch(0x%04X)=0x%08X)\n",
2853
			  ring->idx, scratch, tmp);
1413 serge 2854
		r = -EINVAL;
2855
	}
2856
	radeon_scratch_free(rdev, scratch);
2857
	return r;
2858
}
1963 serge 2859
 
3192 Serge 2860
/*
2861
 * CP fences/semaphores
2862
 */
2863
 
1413 serge 2864
void r600_fence_ring_emit(struct radeon_device *rdev,
2865
			  struct radeon_fence *fence)
2866
{
2997 Serge 2867
	struct radeon_ring *ring = &rdev->ring[fence->ring];
5078 serge 2868
	u32 cp_coher_cntl = PACKET3_TC_ACTION_ENA | PACKET3_VC_ACTION_ENA |
2869
		PACKET3_SH_ACTION_ENA;
2997 Serge 2870
 
5078 serge 2871
	if (rdev->family >= CHIP_RV770)
2872
		cp_coher_cntl |= PACKET3_FULL_CACHE_ENA;
2873
 
1963 serge 2874
	if (rdev->wb.use_event) {
2997 Serge 2875
		u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
2876
		/* flush read cache over gart */
2877
		radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
5078 serge 2878
		radeon_ring_write(ring, cp_coher_cntl);
2997 Serge 2879
		radeon_ring_write(ring, 0xFFFFFFFF);
2880
		radeon_ring_write(ring, 0);
2881
		radeon_ring_write(ring, 10); /* poll interval */
1963 serge 2882
		/* EVENT_WRITE_EOP - flush caches, send int */
2997 Serge 2883
		radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
2884
		radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5));
5078 serge 2885
		radeon_ring_write(ring, lower_32_bits(addr));
2997 Serge 2886
		radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2));
2887
		radeon_ring_write(ring, fence->seq);
2888
		radeon_ring_write(ring, 0);
1963 serge 2889
	} else {
2997 Serge 2890
		/* flush read cache over gart */
2891
		radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
5078 serge 2892
		radeon_ring_write(ring, cp_coher_cntl);
2997 Serge 2893
		radeon_ring_write(ring, 0xFFFFFFFF);
2894
		radeon_ring_write(ring, 0);
2895
		radeon_ring_write(ring, 10); /* poll interval */
2896
		radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE, 0));
2897
		radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT) | EVENT_INDEX(0));
6104 serge 2898
		/* wait for 3D idle clean */
2997 Serge 2899
		radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2900
		radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
2901
		radeon_ring_write(ring, WAIT_3D_IDLE_bit | WAIT_3D_IDLECLEAN_bit);
6104 serge 2902
		/* Emit fence sequence & fire IRQ */
2997 Serge 2903
		radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2904
		radeon_ring_write(ring, ((rdev->fence_drv[fence->ring].scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
2905
		radeon_ring_write(ring, fence->seq);
6104 serge 2906
		/* CP_INTERRUPT packet 3 no longer exists, use packet 0 */
2997 Serge 2907
		radeon_ring_write(ring, PACKET0(CP_INT_STATUS, 0));
2908
		radeon_ring_write(ring, RB_INT_STAT);
1963 serge 2909
	}
1413 serge 2910
}
1963 serge 2911
 
5078 serge 2912
/**
2913
 * r600_semaphore_ring_emit - emit a semaphore on the CP ring
2914
 *
2915
 * @rdev: radeon_device pointer
2916
 * @ring: radeon ring buffer object
2917
 * @semaphore: radeon semaphore object
2918
 * @emit_wait: Is this a sempahore wait?
2919
 *
2920
 * Emits a semaphore signal/wait packet to the CP ring and prevents the PFP
2921
 * from running ahead of semaphore waits.
2922
 */
2923
bool r600_semaphore_ring_emit(struct radeon_device *rdev,
2997 Serge 2924
			      struct radeon_ring *ring,
2925
			      struct radeon_semaphore *semaphore,
2926
			      bool emit_wait)
2927
{
2928
	uint64_t addr = semaphore->gpu_addr;
2929
	unsigned sel = emit_wait ? PACKET3_SEM_SEL_WAIT : PACKET3_SEM_SEL_SIGNAL;
2930
 
2931
	if (rdev->family < CHIP_CAYMAN)
2932
		sel |= PACKET3_SEM_WAIT_ON_SIGNAL;
2933
 
2934
	radeon_ring_write(ring, PACKET3(PACKET3_MEM_SEMAPHORE, 1));
5078 serge 2935
	radeon_ring_write(ring, lower_32_bits(addr));
2997 Serge 2936
	radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | sel);
2937
 
5128 serge 2938
	/* PFP_SYNC_ME packet only exists on 7xx+, only enable it on eg+ */
2939
	if (emit_wait && (rdev->family >= CHIP_CEDAR)) {
5078 serge 2940
		/* Prevent the PFP from running ahead of the semaphore wait */
2941
		radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
2942
		radeon_ring_write(ring, 0x0);
2943
	}
3192 Serge 2944
 
5078 serge 2945
	return true;
3192 Serge 2946
}
2947
 
2948
/**
5078 serge 2949
 * r600_copy_cpdma - copy pages using the CP DMA engine
3192 Serge 2950
 *
2951
 * @rdev: radeon_device pointer
2952
 * @src_offset: src GPU address
2953
 * @dst_offset: dst GPU address
2954
 * @num_gpu_pages: number of GPU pages to xfer
2955
 * @fence: radeon fence object
2956
 *
5078 serge 2957
 * Copy GPU paging using the CP DMA engine (r6xx+).
3192 Serge 2958
 * Used by the radeon ttm implementation to move pages if
2959
 * registered as the asic copy callback.
2960
 */
5271 serge 2961
struct radeon_fence *r600_copy_cpdma(struct radeon_device *rdev,
6104 serge 2962
				     uint64_t src_offset, uint64_t dst_offset,
2963
				     unsigned num_gpu_pages,
5271 serge 2964
				     struct reservation_object *resv)
3192 Serge 2965
{
5271 serge 2966
	struct radeon_fence *fence;
2967
	struct radeon_sync sync;
5078 serge 2968
	int ring_index = rdev->asic->copy.blit_ring_index;
3192 Serge 2969
	struct radeon_ring *ring = &rdev->ring[ring_index];
5078 serge 2970
	u32 size_in_bytes, cur_size_in_bytes, tmp;
3192 Serge 2971
	int i, num_loops;
2972
	int r = 0;
2973
 
5271 serge 2974
	radeon_sync_create(&sync);
3192 Serge 2975
 
5078 serge 2976
	size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
2977
	num_loops = DIV_ROUND_UP(size_in_bytes, 0x1fffff);
2978
	r = radeon_ring_lock(rdev, ring, num_loops * 6 + 24);
3192 Serge 2979
	if (r) {
2980
		DRM_ERROR("radeon: moving bo (%d).\n", r);
5271 serge 2981
		radeon_sync_free(rdev, &sync, NULL);
2982
		return ERR_PTR(r);
3192 Serge 2983
	}
2984
 
5271 serge 2985
	radeon_sync_resv(rdev, &sync, resv, false);
2986
	radeon_sync_rings(rdev, &sync, ring->idx);
3192 Serge 2987
 
5078 serge 2988
	radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2989
	radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
2990
	radeon_ring_write(ring, WAIT_3D_IDLE_bit);
3192 Serge 2991
	for (i = 0; i < num_loops; i++) {
5078 serge 2992
		cur_size_in_bytes = size_in_bytes;
2993
		if (cur_size_in_bytes > 0x1fffff)
2994
			cur_size_in_bytes = 0x1fffff;
2995
		size_in_bytes -= cur_size_in_bytes;
2996
		tmp = upper_32_bits(src_offset) & 0xff;
2997
		if (size_in_bytes == 0)
2998
			tmp |= PACKET3_CP_DMA_CP_SYNC;
2999
		radeon_ring_write(ring, PACKET3(PACKET3_CP_DMA, 4));
3000
		radeon_ring_write(ring, lower_32_bits(src_offset));
3001
		radeon_ring_write(ring, tmp);
3002
		radeon_ring_write(ring, lower_32_bits(dst_offset));
3003
		radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
3004
		radeon_ring_write(ring, cur_size_in_bytes);
3005
		src_offset += cur_size_in_bytes;
3006
		dst_offset += cur_size_in_bytes;
3192 Serge 3007
	}
5078 serge 3008
	radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
3009
	radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
3010
	radeon_ring_write(ring, WAIT_CP_DMA_IDLE_bit);
3192 Serge 3011
 
5271 serge 3012
	r = radeon_fence_emit(rdev, &fence, ring->idx);
3192 Serge 3013
	if (r) {
3014
		radeon_ring_unlock_undo(rdev, ring);
5271 serge 3015
		radeon_sync_free(rdev, &sync, NULL);
3016
		return ERR_PTR(r);
3192 Serge 3017
	}
3018
 
5078 serge 3019
	radeon_ring_unlock_commit(rdev, ring, false);
5271 serge 3020
	radeon_sync_free(rdev, &sync, fence);
3192 Serge 3021
 
5271 serge 3022
	return fence;
3192 Serge 3023
}
3024
 
1221 serge 3025
int r600_set_surface_reg(struct radeon_device *rdev, int reg,
3026
			 uint32_t tiling_flags, uint32_t pitch,
3027
			 uint32_t offset, uint32_t obj_size)
3028
{
3029
	/* FIXME: implement */
3030
	return 0;
3031
}
3032
 
3033
void r600_clear_surface_reg(struct radeon_device *rdev, int reg)
3034
{
3035
	/* FIXME: implement */
3036
}
3037
 
2997 Serge 3038
static int r600_startup(struct radeon_device *rdev)
1221 serge 3039
{
3192 Serge 3040
	struct radeon_ring *ring;
1221 serge 3041
	int r;
3042
 
1963 serge 3043
	/* enable pcie gen2 link */
3044
	r600_pcie_gen2_enable(rdev);
3045
 
5078 serge 3046
	/* scratch needs to be initialized before MC */
3764 Serge 3047
	r = r600_vram_scratch_init(rdev);
3048
	if (r)
3049
		return r;
3050
 
1221 serge 3051
	r600_mc_program(rdev);
5078 serge 3052
 
1221 serge 3053
	if (rdev->flags & RADEON_IS_AGP) {
3054
		r600_agp_enable(rdev);
3055
	} else {
3056
		r = r600_pcie_gart_enable(rdev);
3057
		if (r)
3058
			return r;
3059
	}
3060
	r600_gpu_init(rdev);
3061
 
2005 serge 3062
	/* allocate wb buffer */
3063
	r = radeon_wb_init(rdev);
3064
	if (r)
3065
		return r;
3066
 
3192 Serge 3067
	r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
3068
	if (r) {
3069
		dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
3070
		return r;
3071
	}
3072
 
6104 serge 3073
	if (rdev->has_uvd) {
3074
		r = uvd_v1_0_resume(rdev);
3075
		if (!r) {
3076
			r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_UVD_INDEX);
3077
			if (r) {
3078
				dev_err(rdev->dev, "failed initializing UVD fences (%d).\n", r);
3079
			}
3080
		}
3081
		if (r)
3082
			rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
3083
	}
3084
 
2005 serge 3085
	/* Enable IRQ */
3764 Serge 3086
	if (!rdev->irq.installed) {
3087
		r = radeon_irq_kms_init(rdev);
3088
		if (r)
3089
			return r;
3090
	}
3091
 
2005 serge 3092
	r = r600_irq_init(rdev);
3093
	if (r) {
3094
		DRM_ERROR("radeon: IH init failed (%d).\n", r);
6104 serge 3095
		radeon_irq_kms_fini(rdev);
2005 serge 3096
		return r;
3097
	}
3098
	r600_irq_set(rdev);
3099
 
3192 Serge 3100
	ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
2997 Serge 3101
	r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
5078 serge 3102
			     RADEON_CP_PACKET2);
3192 Serge 3103
	if (r)
3104
		return r;
2997 Serge 3105
 
1413 serge 3106
	r = r600_cp_load_microcode(rdev);
3107
	if (r)
3108
		return r;
3109
	r = r600_cp_resume(rdev);
3110
	if (r)
3111
		return r;
1963 serge 3112
 
6104 serge 3113
	if (rdev->has_uvd) {
3114
		ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
3115
		if (ring->ring_size) {
3116
			r = radeon_ring_init(rdev, ring, ring->ring_size, 0,
3117
					     RADEON_CP_PACKET2);
3118
			if (!r)
3119
				r = uvd_v1_0_init(rdev);
3120
			if (r)
3121
				DRM_ERROR("radeon: failed initializing UVD (%d).\n", r);
3122
		}
3123
	}
3124
 
3192 Serge 3125
	r = radeon_ib_pool_init(rdev);
3126
	if (r) {
3127
		dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
3128
		return r;
3129
	}
5078 serge 3130
 
6321 serge 3131
	r = radeon_audio_init(rdev);
3132
	if (r) {
3133
		DRM_ERROR("radeon: audio init failed\n");
3134
		return r;
3135
	}
5078 serge 3136
 
1221 serge 3137
	return 0;
3138
}
3139
 
3140
void r600_vga_set_state(struct radeon_device *rdev, bool state)
3141
{
3142
	uint32_t temp;
3143
 
3144
	temp = RREG32(CONFIG_CNTL);
3145
	if (state == false) {
3146
		temp &= ~(1<<0);
3147
		temp |= (1<<1);
3148
	} else {
3149
		temp &= ~(1<<1);
3150
	}
3151
	WREG32(CONFIG_CNTL, temp);
3152
}
3153
 
3154
 
3155
 
3156
 
3157
 
3158
/* Plan is to move initialization in that function and use
3159
 * helper function so that radeon_device_init pretty much
3160
 * do nothing more than calling asic specific function. This
3161
 * should also allow to remove a bunch of callback function
3162
 * like vram_info.
3163
 */
3164
int r600_init(struct radeon_device *rdev)
3165
{
3166
	int r;
3167
 
3168
	if (r600_debugfs_mc_info_init(rdev)) {
3169
		DRM_ERROR("Failed to register debugfs file for mc !\n");
3170
	}
3171
	/* Read BIOS */
3172
	if (!radeon_get_bios(rdev)) {
3173
		if (ASIC_IS_AVIVO(rdev))
3174
			return -EINVAL;
3175
	}
3176
	/* Must be an ATOMBIOS */
3177
	if (!rdev->is_atom_bios) {
3178
		dev_err(rdev->dev, "Expecting atombios for R600 GPU\n");
3179
		return -EINVAL;
3180
	}
3181
	r = radeon_atombios_init(rdev);
3182
	if (r)
3183
		return r;
3184
	/* Post card if necessary */
1963 serge 3185
	if (!radeon_card_posted(rdev)) {
1321 serge 3186
		if (!rdev->bios) {
3187
			dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
3188
			return -EINVAL;
3189
		}
1221 serge 3190
		DRM_INFO("GPU not posted. posting now...\n");
3191
		atom_asic_init(rdev->mode_info.atom_context);
3192
	}
3193
	/* Initialize scratch registers */
3194
	r600_scratch_init(rdev);
3195
	/* Initialize surface registers */
3196
	radeon_surface_init(rdev);
1268 serge 3197
	/* Initialize clocks */
1221 serge 3198
	radeon_get_clock_info(rdev->ddev);
3199
	/* Fence driver */
2004 serge 3200
	r = radeon_fence_driver_init(rdev);
3201
	if (r)
3202
		return r;
1403 serge 3203
	if (rdev->flags & RADEON_IS_AGP) {
3204
		r = radeon_agp_init(rdev);
3205
		if (r)
3206
			radeon_agp_disable(rdev);
3207
	}
1221 serge 3208
	r = r600_mc_init(rdev);
3209
	if (r)
3210
		return r;
3211
	/* Memory manager */
1321 serge 3212
	r = radeon_bo_init(rdev);
1221 serge 3213
	if (r)
3214
		return r;
1321 serge 3215
 
5078 serge 3216
	if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
3217
		r = r600_init_microcode(rdev);
3218
		if (r) {
3219
			DRM_ERROR("Failed to load firmware!\n");
3220
			return r;
3221
		}
3222
	}
3223
 
3224
	/* Initialize power management */
3225
	radeon_pm_init(rdev);
3226
 
2997 Serge 3227
	rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
3228
	r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
1221 serge 3229
 
6104 serge 3230
	if (rdev->has_uvd) {
3231
		r = radeon_uvd_init(rdev);
3232
		if (!r) {
3233
			rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_obj = NULL;
3234
			r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_UVD_INDEX], 4096);
3235
		}
3236
	}
3237
 
2004 serge 3238
	rdev->ih.ring_obj = NULL;
3239
	r600_ih_ring_init(rdev, 64 * 1024);
1221 serge 3240
 
3241
	r = r600_pcie_gart_init(rdev);
3242
	if (r)
3243
		return r;
3244
 
1321 serge 3245
	rdev->accel_working = true;
1221 serge 3246
	r = r600_startup(rdev);
3247
	if (r) {
1428 serge 3248
		dev_err(rdev->dev, "disabling GPU acceleration\n");
6104 serge 3249
		r600_cp_fini(rdev);
3250
		r600_irq_fini(rdev);
3251
		radeon_wb_fini(rdev);
3252
		radeon_ib_pool_fini(rdev);
3253
		radeon_irq_kms_fini(rdev);
1221 serge 3254
		r600_pcie_gart_fini(rdev);
3255
		rdev->accel_working = false;
3256
	}
2005 serge 3257
 
1221 serge 3258
	return 0;
3259
}
3260
 
6104 serge 3261
void r600_fini(struct radeon_device *rdev)
3262
{
3263
	radeon_pm_fini(rdev);
3264
	radeon_audio_fini(rdev);
3265
	r600_cp_fini(rdev);
3266
	r600_irq_fini(rdev);
3267
	if (rdev->has_uvd) {
3268
		uvd_v1_0_fini(rdev);
3269
		radeon_uvd_fini(rdev);
3270
	}
3271
	radeon_wb_fini(rdev);
3272
	radeon_ib_pool_fini(rdev);
3273
	radeon_irq_kms_fini(rdev);
3274
	r600_pcie_gart_fini(rdev);
3275
	r600_vram_scratch_fini(rdev);
3276
	radeon_agp_fini(rdev);
3277
	radeon_gem_fini(rdev);
3278
	radeon_fence_driver_fini(rdev);
3279
	radeon_bo_fini(rdev);
3280
	radeon_atombios_fini(rdev);
3281
	kfree(rdev->bios);
3282
	rdev->bios = NULL;
3283
}
3284
 
3285
 
2004 serge 3286
/*
3287
 * CS stuff
3288
 */
3289
void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
3290
{
2997 Serge 3291
	struct radeon_ring *ring = &rdev->ring[ib->ring];
3292
	u32 next_rptr;
3293
 
3294
	if (ring->rptr_save_reg) {
3295
		next_rptr = ring->wptr + 3 + 4;
3296
		radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
3297
		radeon_ring_write(ring, ((ring->rptr_save_reg -
3298
					 PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
3299
		radeon_ring_write(ring, next_rptr);
3300
	} else if (rdev->wb.enabled) {
3301
		next_rptr = ring->wptr + 5 + 4;
3302
		radeon_ring_write(ring, PACKET3(PACKET3_MEM_WRITE, 3));
3303
		radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
3304
		radeon_ring_write(ring, (upper_32_bits(ring->next_rptr_gpu_addr) & 0xff) | (1 << 18));
3305
		radeon_ring_write(ring, next_rptr);
3306
		radeon_ring_write(ring, 0);
3307
	}
3308
 
3309
	radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
3310
	radeon_ring_write(ring,
2004 serge 3311
#ifdef __BIG_ENDIAN
3312
			  (2 << 0) |
3313
#endif
3314
			  (ib->gpu_addr & 0xFFFFFFFC));
2997 Serge 3315
	radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFF);
3316
	radeon_ring_write(ring, ib->length_dw);
2004 serge 3317
}
3318
 
2997 Serge 3319
int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
2004 serge 3320
{
2997 Serge 3321
	struct radeon_ib ib;
2004 serge 3322
	uint32_t scratch;
3323
	uint32_t tmp = 0;
3324
	unsigned i;
3325
	int r;
3326
 
3327
	r = radeon_scratch_get(rdev, &scratch);
3328
	if (r) {
3329
		DRM_ERROR("radeon: failed to get scratch reg (%d).\n", r);
3330
		return r;
3331
	}
3332
	WREG32(scratch, 0xCAFEDEAD);
2997 Serge 3333
	r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256);
2004 serge 3334
	if (r) {
3335
		DRM_ERROR("radeon: failed to get ib (%d).\n", r);
2997 Serge 3336
		goto free_scratch;
2004 serge 3337
	}
2997 Serge 3338
	ib.ptr[0] = PACKET3(PACKET3_SET_CONFIG_REG, 1);
3339
	ib.ptr[1] = ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
3340
	ib.ptr[2] = 0xDEADBEEF;
3341
	ib.length_dw = 3;
5078 serge 3342
	r = radeon_ib_schedule(rdev, &ib, NULL, false);
2004 serge 3343
	if (r) {
3344
		DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
2997 Serge 3345
		goto free_ib;
2004 serge 3346
	}
2997 Serge 3347
	r = radeon_fence_wait(ib.fence, false);
2004 serge 3348
	if (r) {
3349
		DRM_ERROR("radeon: fence wait failed (%d).\n", r);
2997 Serge 3350
		goto free_ib;
2004 serge 3351
	}
3352
	for (i = 0; i < rdev->usec_timeout; i++) {
3353
		tmp = RREG32(scratch);
3354
		if (tmp == 0xDEADBEEF)
3355
			break;
3356
		DRM_UDELAY(1);
3357
	}
3358
	if (i < rdev->usec_timeout) {
2997 Serge 3359
		DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib.fence->ring, i);
2004 serge 3360
	} else {
3361
		DRM_ERROR("radeon: ib test failed (scratch(0x%04X)=0x%08X)\n",
3362
			  scratch, tmp);
3363
		r = -EINVAL;
3364
	}
2997 Serge 3365
free_ib:
3366
	radeon_ib_free(rdev, &ib);
3367
free_scratch:
2004 serge 3368
	radeon_scratch_free(rdev, scratch);
3369
	return r;
3370
}
3371
 
3372
/*
3373
 * Interrupts
3374
 *
3375
 * Interrupts use a ring buffer on r6xx/r7xx hardware.  It works pretty
3376
 * the same as the CP ring buffer, but in reverse.  Rather than the CPU
3377
 * writing to the ring and the GPU consuming, the GPU writes to the ring
3378
 * and host consumes.  As the host irq handler processes interrupts, it
3379
 * increments the rptr.  When the rptr catches up with the wptr, all the
3380
 * current interrupts have been processed.
3381
 */
3382
 
3383
void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size)
3384
{
3385
	u32 rb_bufsz;
3386
 
3387
	/* Align ring size */
5078 serge 3388
	rb_bufsz = order_base_2(ring_size / 4);
2004 serge 3389
	ring_size = (1 << rb_bufsz) * 4;
3390
	rdev->ih.ring_size = ring_size;
3391
	rdev->ih.ptr_mask = rdev->ih.ring_size - 1;
3392
	rdev->ih.rptr = 0;
3393
}
3394
 
2997 Serge 3395
int r600_ih_ring_alloc(struct radeon_device *rdev)
2004 serge 3396
{
3397
	int r;
3398
 
3399
	/* Allocate ring buffer */
3400
	if (rdev->ih.ring_obj == NULL) {
3401
		r = radeon_bo_create(rdev, rdev->ih.ring_size,
3402
				     PAGE_SIZE, true,
5078 serge 3403
				     RADEON_GEM_DOMAIN_GTT, 0,
5271 serge 3404
				     NULL, NULL, &rdev->ih.ring_obj);
2004 serge 3405
		if (r) {
3406
			DRM_ERROR("radeon: failed to create ih ring buffer (%d).\n", r);
3407
			return r;
3408
		}
3409
		r = radeon_bo_reserve(rdev->ih.ring_obj, false);
3410
		if (unlikely(r != 0))
3411
			return r;
3412
		r = radeon_bo_pin(rdev->ih.ring_obj,
3413
				  RADEON_GEM_DOMAIN_GTT,
3414
				  &rdev->ih.gpu_addr);
3415
		if (r) {
3416
			radeon_bo_unreserve(rdev->ih.ring_obj);
3417
			DRM_ERROR("radeon: failed to pin ih ring buffer (%d).\n", r);
3418
			return r;
3419
		}
3420
		r = radeon_bo_kmap(rdev->ih.ring_obj,
3421
				   (void **)&rdev->ih.ring);
3422
		radeon_bo_unreserve(rdev->ih.ring_obj);
3423
		if (r) {
3424
			DRM_ERROR("radeon: failed to map ih ring buffer (%d).\n", r);
3425
			return r;
3426
		}
3427
	}
3428
	return 0;
3429
}
3430
 
2997 Serge 3431
void r600_ih_ring_fini(struct radeon_device *rdev)
2004 serge 3432
{
3433
	int r;
3434
	if (rdev->ih.ring_obj) {
3435
		r = radeon_bo_reserve(rdev->ih.ring_obj, false);
3436
		if (likely(r == 0)) {
3437
			radeon_bo_kunmap(rdev->ih.ring_obj);
3438
			radeon_bo_unpin(rdev->ih.ring_obj);
3439
			radeon_bo_unreserve(rdev->ih.ring_obj);
3440
		}
3441
		radeon_bo_unref(&rdev->ih.ring_obj);
3442
		rdev->ih.ring = NULL;
3443
		rdev->ih.ring_obj = NULL;
3444
	}
3445
}
3446
 
3447
void r600_rlc_stop(struct radeon_device *rdev)
3448
{
3449
 
3450
	if ((rdev->family >= CHIP_RV770) &&
3451
	    (rdev->family <= CHIP_RV740)) {
3452
		/* r7xx asics need to soft reset RLC before halting */
3453
		WREG32(SRBM_SOFT_RESET, SOFT_RESET_RLC);
3454
		RREG32(SRBM_SOFT_RESET);
2997 Serge 3455
		mdelay(15);
2004 serge 3456
		WREG32(SRBM_SOFT_RESET, 0);
3457
		RREG32(SRBM_SOFT_RESET);
3458
	}
3459
 
3460
	WREG32(RLC_CNTL, 0);
3461
}
3462
 
3463
static void r600_rlc_start(struct radeon_device *rdev)
3464
{
3465
	WREG32(RLC_CNTL, RLC_ENABLE);
3466
}
3467
 
5078 serge 3468
static int r600_rlc_resume(struct radeon_device *rdev)
2004 serge 3469
{
3470
	u32 i;
3471
	const __be32 *fw_data;
3472
 
3473
	if (!rdev->rlc_fw)
3474
		return -EINVAL;
3475
 
3476
	r600_rlc_stop(rdev);
3477
 
2997 Serge 3478
	WREG32(RLC_HB_CNTL, 0);
3479
 
2004 serge 3480
	WREG32(RLC_HB_BASE, 0);
3481
	WREG32(RLC_HB_RPTR, 0);
3482
	WREG32(RLC_HB_WPTR, 0);
6104 serge 3483
	WREG32(RLC_HB_WPTR_LSB_ADDR, 0);
3484
	WREG32(RLC_HB_WPTR_MSB_ADDR, 0);
2004 serge 3485
	WREG32(RLC_MC_CNTL, 0);
3486
	WREG32(RLC_UCODE_CNTL, 0);
3487
 
3488
	fw_data = (const __be32 *)rdev->rlc_fw->data;
5078 serge 3489
	if (rdev->family >= CHIP_RV770) {
2004 serge 3490
		for (i = 0; i < R700_RLC_UCODE_SIZE; i++) {
3491
			WREG32(RLC_UCODE_ADDR, i);
3492
			WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
3493
		}
3494
	} else {
5078 serge 3495
		for (i = 0; i < R600_RLC_UCODE_SIZE; i++) {
2004 serge 3496
			WREG32(RLC_UCODE_ADDR, i);
3497
			WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
3498
		}
3499
	}
3500
	WREG32(RLC_UCODE_ADDR, 0);
3501
 
3502
	r600_rlc_start(rdev);
3503
 
3504
	return 0;
3505
}
3506
 
3507
static void r600_enable_interrupts(struct radeon_device *rdev)
3508
{
3509
	u32 ih_cntl = RREG32(IH_CNTL);
3510
	u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
3511
 
3512
	ih_cntl |= ENABLE_INTR;
3513
	ih_rb_cntl |= IH_RB_ENABLE;
3514
	WREG32(IH_CNTL, ih_cntl);
3515
	WREG32(IH_RB_CNTL, ih_rb_cntl);
3516
	rdev->ih.enabled = true;
3517
}
3518
 
3519
void r600_disable_interrupts(struct radeon_device *rdev)
3520
{
3521
	u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
3522
	u32 ih_cntl = RREG32(IH_CNTL);
3523
 
3524
	ih_rb_cntl &= ~IH_RB_ENABLE;
3525
	ih_cntl &= ~ENABLE_INTR;
3526
	WREG32(IH_RB_CNTL, ih_rb_cntl);
3527
	WREG32(IH_CNTL, ih_cntl);
3528
	/* set rptr, wptr to 0 */
3529
	WREG32(IH_RB_RPTR, 0);
3530
	WREG32(IH_RB_WPTR, 0);
3531
	rdev->ih.enabled = false;
3532
	rdev->ih.rptr = 0;
3533
}
3534
 
1963 serge 3535
static void r600_disable_interrupt_state(struct radeon_device *rdev)
3536
{
3537
	u32 tmp;
1221 serge 3538
 
1963 serge 3539
	WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
3192 Serge 3540
	tmp = RREG32(DMA_CNTL) & ~TRAP_ENABLE;
3541
	WREG32(DMA_CNTL, tmp);
1963 serge 3542
	WREG32(GRBM_INT_CNTL, 0);
3543
	WREG32(DxMODE_INT_MASK, 0);
3544
	WREG32(D1GRPH_INTERRUPT_CONTROL, 0);
3545
	WREG32(D2GRPH_INTERRUPT_CONTROL, 0);
3546
	if (ASIC_IS_DCE3(rdev)) {
3547
		WREG32(DCE3_DACA_AUTODETECT_INT_CONTROL, 0);
3548
		WREG32(DCE3_DACB_AUTODETECT_INT_CONTROL, 0);
3549
		tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY;
3550
		WREG32(DC_HPD1_INT_CONTROL, tmp);
3551
		tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY;
3552
		WREG32(DC_HPD2_INT_CONTROL, tmp);
3553
		tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY;
3554
		WREG32(DC_HPD3_INT_CONTROL, tmp);
3555
		tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY;
3556
		WREG32(DC_HPD4_INT_CONTROL, tmp);
3557
		if (ASIC_IS_DCE32(rdev)) {
3558
			tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY;
3559
			WREG32(DC_HPD5_INT_CONTROL, tmp);
3560
			tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY;
3561
			WREG32(DC_HPD6_INT_CONTROL, tmp);
2997 Serge 3562
			tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3563
			WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0, tmp);
3564
			tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3565
			WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1, tmp);
3566
		} else {
3567
			tmp = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3568
			WREG32(HDMI0_AUDIO_PACKET_CONTROL, tmp);
3569
			tmp = RREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3570
			WREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL, tmp);
1963 serge 3571
		}
3572
	} else {
3573
		WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
3574
		WREG32(DACB_AUTODETECT_INT_CONTROL, 0);
3575
		tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
3576
		WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
3577
		tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
3578
		WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
3579
		tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
3580
		WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
2997 Serge 3581
		tmp = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3582
		WREG32(HDMI0_AUDIO_PACKET_CONTROL, tmp);
3583
		tmp = RREG32(HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3584
		WREG32(HDMI1_AUDIO_PACKET_CONTROL, tmp);
1963 serge 3585
	}
3586
}
1221 serge 3587
 
2004 serge 3588
int r600_irq_init(struct radeon_device *rdev)
3589
{
3590
	int ret = 0;
3591
	int rb_bufsz;
3592
	u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
1221 serge 3593
 
2004 serge 3594
	/* allocate ring */
3595
	ret = r600_ih_ring_alloc(rdev);
3596
	if (ret)
3597
		return ret;
1221 serge 3598
 
2004 serge 3599
	/* disable irqs */
3600
	r600_disable_interrupts(rdev);
1221 serge 3601
 
2004 serge 3602
	/* init rlc */
5078 serge 3603
	if (rdev->family >= CHIP_CEDAR)
3604
		ret = evergreen_rlc_resume(rdev);
3605
	else
3606
		ret = r600_rlc_resume(rdev);
2004 serge 3607
	if (ret) {
3608
		r600_ih_ring_fini(rdev);
3609
		return ret;
3610
	}
1221 serge 3611
 
2004 serge 3612
	/* setup interrupt control */
3613
	/* set dummy read address to ring address */
3614
	WREG32(INTERRUPT_CNTL2, rdev->ih.gpu_addr >> 8);
3615
	interrupt_cntl = RREG32(INTERRUPT_CNTL);
3616
	/* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi
3617
	 * IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN
3618
	 */
3619
	interrupt_cntl &= ~IH_DUMMY_RD_OVERRIDE;
3620
	/* IH_REQ_NONSNOOP_EN=1 if ring is in non-cacheable memory, e.g., vram */
3621
	interrupt_cntl &= ~IH_REQ_NONSNOOP_EN;
3622
	WREG32(INTERRUPT_CNTL, interrupt_cntl);
1221 serge 3623
 
2004 serge 3624
	WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8);
5078 serge 3625
	rb_bufsz = order_base_2(rdev->ih.ring_size / 4);
1221 serge 3626
 
2004 serge 3627
	ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE |
3628
		      IH_WPTR_OVERFLOW_CLEAR |
3629
		      (rb_bufsz << 1));
1963 serge 3630
 
2004 serge 3631
	if (rdev->wb.enabled)
3632
		ih_rb_cntl |= IH_WPTR_WRITEBACK_ENABLE;
3633
 
3634
	/* set the writeback address whether it's enabled or not */
3635
	WREG32(IH_RB_WPTR_ADDR_LO, (rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFFFFFFFC);
3636
	WREG32(IH_RB_WPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFF);
3637
 
3638
	WREG32(IH_RB_CNTL, ih_rb_cntl);
3639
 
3640
	/* set rptr, wptr to 0 */
3641
	WREG32(IH_RB_RPTR, 0);
3642
	WREG32(IH_RB_WPTR, 0);
3643
 
3644
	/* Default settings for IH_CNTL (disabled at first) */
3645
	ih_cntl = MC_WRREQ_CREDIT(0x10) | MC_WR_CLEAN_CNT(0x10);
3646
	/* RPTR_REARM only works if msi's are enabled */
3647
	if (rdev->msi_enabled)
3648
		ih_cntl |= RPTR_REARM;
3649
	WREG32(IH_CNTL, ih_cntl);
3650
 
3651
	/* force the active interrupt state to all disabled */
3652
	if (rdev->family >= CHIP_CEDAR)
3653
		evergreen_disable_interrupt_state(rdev);
3654
	else
3655
		r600_disable_interrupt_state(rdev);
3656
 
2997 Serge 3657
	/* at this point everything should be setup correctly to enable master */
3658
	pci_set_master(rdev->pdev);
3659
 
2004 serge 3660
	/* enable irqs */
3661
	r600_enable_interrupts(rdev);
3662
 
3663
	return ret;
3664
}
6104 serge 3665
 
3666
void r600_irq_suspend(struct radeon_device *rdev)
3667
{
3668
	r600_irq_disable(rdev);
3669
	r600_rlc_stop(rdev);
3670
}
3671
 
3672
void r600_irq_fini(struct radeon_device *rdev)
3673
{
3674
	r600_irq_suspend(rdev);
3675
	r600_ih_ring_fini(rdev);
3676
}
3677
 
2004 serge 3678
int r600_irq_set(struct radeon_device *rdev)
3679
{
3680
	u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE;
3681
	u32 mode_int = 0;
3682
	u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0;
3683
	u32 grbm_int_cntl = 0;
2997 Serge 3684
	u32 hdmi0, hdmi1;
3192 Serge 3685
	u32 dma_cntl;
5078 serge 3686
	u32 thermal_int = 0;
2004 serge 3687
 
3688
	if (!rdev->irq.installed) {
3689
		WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
3690
		return -EINVAL;
3691
	}
3692
	/* don't enable anything if the ih is disabled */
3693
	if (!rdev->ih.enabled) {
3694
		r600_disable_interrupts(rdev);
3695
		/* force the active interrupt state to all disabled */
3696
		r600_disable_interrupt_state(rdev);
3697
		return 0;
3698
	}
3699
 
3700
	if (ASIC_IS_DCE3(rdev)) {
3701
		hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
3702
		hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
3703
		hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
3704
		hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN;
3705
		if (ASIC_IS_DCE32(rdev)) {
3706
			hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
3707
			hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
2997 Serge 3708
			hdmi0 = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
3709
			hdmi1 = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
3710
		} else {
3711
			hdmi0 = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3712
			hdmi1 = RREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
2004 serge 3713
		}
3714
	} else {
3715
		hpd1 = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & ~DC_HPDx_INT_EN;
3716
		hpd2 = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & ~DC_HPDx_INT_EN;
3717
		hpd3 = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & ~DC_HPDx_INT_EN;
2997 Serge 3718
		hdmi0 = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3719
		hdmi1 = RREG32(HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
2004 serge 3720
	}
5078 serge 3721
 
3192 Serge 3722
	dma_cntl = RREG32(DMA_CNTL) & ~TRAP_ENABLE;
2004 serge 3723
 
5078 serge 3724
	if ((rdev->family > CHIP_R600) && (rdev->family < CHIP_RV770)) {
3725
		thermal_int = RREG32(CG_THERMAL_INT) &
3726
			~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW);
3727
	} else if (rdev->family >= CHIP_RV770) {
3728
		thermal_int = RREG32(RV770_CG_THERMAL_INT) &
3729
			~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW);
3730
	}
3731
	if (rdev->irq.dpm_thermal) {
3732
		DRM_DEBUG("dpm thermal\n");
3733
		thermal_int |= THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW;
3734
	}
3735
 
2997 Serge 3736
	if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
2004 serge 3737
		DRM_DEBUG("r600_irq_set: sw int\n");
3738
		cp_int_cntl |= RB_INT_ENABLE;
3739
		cp_int_cntl |= TIME_STAMP_INT_ENABLE;
3740
	}
3192 Serge 3741
 
3742
	if (atomic_read(&rdev->irq.ring_int[R600_RING_TYPE_DMA_INDEX])) {
3743
		DRM_DEBUG("r600_irq_set: sw int dma\n");
3744
		dma_cntl |= TRAP_ENABLE;
3745
	}
3746
 
2004 serge 3747
	if (rdev->irq.crtc_vblank_int[0] ||
2997 Serge 3748
	    atomic_read(&rdev->irq.pflip[0])) {
2004 serge 3749
		DRM_DEBUG("r600_irq_set: vblank 0\n");
3750
		mode_int |= D1MODE_VBLANK_INT_MASK;
3751
	}
3752
	if (rdev->irq.crtc_vblank_int[1] ||
2997 Serge 3753
	    atomic_read(&rdev->irq.pflip[1])) {
2004 serge 3754
		DRM_DEBUG("r600_irq_set: vblank 1\n");
3755
		mode_int |= D2MODE_VBLANK_INT_MASK;
3756
	}
3757
	if (rdev->irq.hpd[0]) {
3758
		DRM_DEBUG("r600_irq_set: hpd 1\n");
3759
		hpd1 |= DC_HPDx_INT_EN;
3760
	}
3761
	if (rdev->irq.hpd[1]) {
3762
		DRM_DEBUG("r600_irq_set: hpd 2\n");
3763
		hpd2 |= DC_HPDx_INT_EN;
3764
	}
3765
	if (rdev->irq.hpd[2]) {
3766
		DRM_DEBUG("r600_irq_set: hpd 3\n");
3767
		hpd3 |= DC_HPDx_INT_EN;
3768
	}
3769
	if (rdev->irq.hpd[3]) {
3770
		DRM_DEBUG("r600_irq_set: hpd 4\n");
3771
		hpd4 |= DC_HPDx_INT_EN;
3772
	}
3773
	if (rdev->irq.hpd[4]) {
3774
		DRM_DEBUG("r600_irq_set: hpd 5\n");
3775
		hpd5 |= DC_HPDx_INT_EN;
3776
	}
3777
	if (rdev->irq.hpd[5]) {
3778
		DRM_DEBUG("r600_irq_set: hpd 6\n");
3779
		hpd6 |= DC_HPDx_INT_EN;
3780
	}
2997 Serge 3781
	if (rdev->irq.afmt[0]) {
3782
		DRM_DEBUG("r600_irq_set: hdmi 0\n");
3783
		hdmi0 |= HDMI0_AZ_FORMAT_WTRIG_MASK;
2004 serge 3784
	}
2997 Serge 3785
	if (rdev->irq.afmt[1]) {
3786
		DRM_DEBUG("r600_irq_set: hdmi 0\n");
3787
		hdmi1 |= HDMI0_AZ_FORMAT_WTRIG_MASK;
2004 serge 3788
	}
3789
 
3790
	WREG32(CP_INT_CNTL, cp_int_cntl);
3192 Serge 3791
	WREG32(DMA_CNTL, dma_cntl);
2004 serge 3792
	WREG32(DxMODE_INT_MASK, mode_int);
5078 serge 3793
	WREG32(D1GRPH_INTERRUPT_CONTROL, DxGRPH_PFLIP_INT_MASK);
3794
	WREG32(D2GRPH_INTERRUPT_CONTROL, DxGRPH_PFLIP_INT_MASK);
2004 serge 3795
	WREG32(GRBM_INT_CNTL, grbm_int_cntl);
3796
	if (ASIC_IS_DCE3(rdev)) {
3797
		WREG32(DC_HPD1_INT_CONTROL, hpd1);
3798
		WREG32(DC_HPD2_INT_CONTROL, hpd2);
3799
		WREG32(DC_HPD3_INT_CONTROL, hpd3);
3800
		WREG32(DC_HPD4_INT_CONTROL, hpd4);
3801
		if (ASIC_IS_DCE32(rdev)) {
3802
			WREG32(DC_HPD5_INT_CONTROL, hpd5);
3803
			WREG32(DC_HPD6_INT_CONTROL, hpd6);
2997 Serge 3804
			WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0, hdmi0);
3805
			WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1, hdmi1);
3806
		} else {
3807
			WREG32(HDMI0_AUDIO_PACKET_CONTROL, hdmi0);
3808
			WREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL, hdmi1);
2004 serge 3809
		}
3810
	} else {
3811
		WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, hpd1);
3812
		WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2);
3813
		WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, hpd3);
2997 Serge 3814
		WREG32(HDMI0_AUDIO_PACKET_CONTROL, hdmi0);
3815
		WREG32(HDMI1_AUDIO_PACKET_CONTROL, hdmi1);
2004 serge 3816
	}
5078 serge 3817
	if ((rdev->family > CHIP_R600) && (rdev->family < CHIP_RV770)) {
3818
		WREG32(CG_THERMAL_INT, thermal_int);
3819
	} else if (rdev->family >= CHIP_RV770) {
3820
		WREG32(RV770_CG_THERMAL_INT, thermal_int);
3821
	}
2004 serge 3822
 
6104 serge 3823
	/* posting read */
3824
	RREG32(R_000E50_SRBM_STATUS);
3825
 
2004 serge 3826
	return 0;
3827
}
3828
 
2997 Serge 3829
static void r600_irq_ack(struct radeon_device *rdev)
2004 serge 3830
{
3831
	u32 tmp;
3832
 
3833
	if (ASIC_IS_DCE3(rdev)) {
3834
		rdev->irq.stat_regs.r600.disp_int = RREG32(DCE3_DISP_INTERRUPT_STATUS);
3835
		rdev->irq.stat_regs.r600.disp_int_cont = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE);
3836
		rdev->irq.stat_regs.r600.disp_int_cont2 = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE2);
2997 Serge 3837
		if (ASIC_IS_DCE32(rdev)) {
3838
			rdev->irq.stat_regs.r600.hdmi0_status = RREG32(AFMT_STATUS + DCE3_HDMI_OFFSET0);
3839
			rdev->irq.stat_regs.r600.hdmi1_status = RREG32(AFMT_STATUS + DCE3_HDMI_OFFSET1);
3840
		} else {
3841
			rdev->irq.stat_regs.r600.hdmi0_status = RREG32(HDMI0_STATUS);
3842
			rdev->irq.stat_regs.r600.hdmi1_status = RREG32(DCE3_HDMI1_STATUS);
3843
		}
2004 serge 3844
	} else {
3845
		rdev->irq.stat_regs.r600.disp_int = RREG32(DISP_INTERRUPT_STATUS);
3846
		rdev->irq.stat_regs.r600.disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
3847
		rdev->irq.stat_regs.r600.disp_int_cont2 = 0;
2997 Serge 3848
		rdev->irq.stat_regs.r600.hdmi0_status = RREG32(HDMI0_STATUS);
3849
		rdev->irq.stat_regs.r600.hdmi1_status = RREG32(HDMI1_STATUS);
2004 serge 3850
	}
3851
	rdev->irq.stat_regs.r600.d1grph_int = RREG32(D1GRPH_INTERRUPT_STATUS);
3852
	rdev->irq.stat_regs.r600.d2grph_int = RREG32(D2GRPH_INTERRUPT_STATUS);
3853
 
3854
	if (rdev->irq.stat_regs.r600.d1grph_int & DxGRPH_PFLIP_INT_OCCURRED)
3855
		WREG32(D1GRPH_INTERRUPT_STATUS, DxGRPH_PFLIP_INT_CLEAR);
3856
	if (rdev->irq.stat_regs.r600.d2grph_int & DxGRPH_PFLIP_INT_OCCURRED)
3857
		WREG32(D2GRPH_INTERRUPT_STATUS, DxGRPH_PFLIP_INT_CLEAR);
3858
	if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VBLANK_INTERRUPT)
3859
		WREG32(D1MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK);
3860
	if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VLINE_INTERRUPT)
3861
		WREG32(D1MODE_VLINE_STATUS, DxMODE_VLINE_ACK);
3862
	if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VBLANK_INTERRUPT)
3863
		WREG32(D2MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK);
3864
	if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VLINE_INTERRUPT)
3865
		WREG32(D2MODE_VLINE_STATUS, DxMODE_VLINE_ACK);
3866
	if (rdev->irq.stat_regs.r600.disp_int & DC_HPD1_INTERRUPT) {
3867
		if (ASIC_IS_DCE3(rdev)) {
3868
			tmp = RREG32(DC_HPD1_INT_CONTROL);
3869
			tmp |= DC_HPDx_INT_ACK;
3870
			WREG32(DC_HPD1_INT_CONTROL, tmp);
3871
		} else {
3872
			tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL);
3873
			tmp |= DC_HPDx_INT_ACK;
3874
			WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
3875
		}
3876
	}
3877
	if (rdev->irq.stat_regs.r600.disp_int & DC_HPD2_INTERRUPT) {
3878
		if (ASIC_IS_DCE3(rdev)) {
3879
			tmp = RREG32(DC_HPD2_INT_CONTROL);
3880
			tmp |= DC_HPDx_INT_ACK;
3881
			WREG32(DC_HPD2_INT_CONTROL, tmp);
3882
		} else {
3883
			tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL);
3884
			tmp |= DC_HPDx_INT_ACK;
3885
			WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
3886
		}
3887
	}
3888
	if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD3_INTERRUPT) {
3889
		if (ASIC_IS_DCE3(rdev)) {
3890
			tmp = RREG32(DC_HPD3_INT_CONTROL);
3891
			tmp |= DC_HPDx_INT_ACK;
3892
			WREG32(DC_HPD3_INT_CONTROL, tmp);
3893
		} else {
3894
			tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL);
3895
			tmp |= DC_HPDx_INT_ACK;
3896
			WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
3897
		}
3898
	}
3899
	if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD4_INTERRUPT) {
3900
		tmp = RREG32(DC_HPD4_INT_CONTROL);
3901
		tmp |= DC_HPDx_INT_ACK;
3902
		WREG32(DC_HPD4_INT_CONTROL, tmp);
3903
	}
3904
	if (ASIC_IS_DCE32(rdev)) {
3905
		if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD5_INTERRUPT) {
3906
			tmp = RREG32(DC_HPD5_INT_CONTROL);
3907
			tmp |= DC_HPDx_INT_ACK;
3908
			WREG32(DC_HPD5_INT_CONTROL, tmp);
3909
		}
3910
		if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT) {
3911
			tmp = RREG32(DC_HPD5_INT_CONTROL);
3912
			tmp |= DC_HPDx_INT_ACK;
3913
			WREG32(DC_HPD6_INT_CONTROL, tmp);
3914
		}
2997 Serge 3915
		if (rdev->irq.stat_regs.r600.hdmi0_status & AFMT_AZ_FORMAT_WTRIG) {
3916
			tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0);
3917
			tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
3918
			WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0, tmp);
3919
		}
3920
		if (rdev->irq.stat_regs.r600.hdmi1_status & AFMT_AZ_FORMAT_WTRIG) {
3921
			tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1);
3922
			tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
3923
			WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1, tmp);
6104 serge 3924
		}
2997 Serge 3925
	} else {
3926
		if (rdev->irq.stat_regs.r600.hdmi0_status & HDMI0_AZ_FORMAT_WTRIG) {
3927
			tmp = RREG32(HDMI0_AUDIO_PACKET_CONTROL);
3928
			tmp |= HDMI0_AZ_FORMAT_WTRIG_ACK;
3929
			WREG32(HDMI0_AUDIO_PACKET_CONTROL, tmp);
6104 serge 3930
		}
2997 Serge 3931
		if (rdev->irq.stat_regs.r600.hdmi1_status & HDMI0_AZ_FORMAT_WTRIG) {
6104 serge 3932
			if (ASIC_IS_DCE3(rdev)) {
2997 Serge 3933
				tmp = RREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL);
3934
				tmp |= HDMI0_AZ_FORMAT_WTRIG_ACK;
3935
				WREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL, tmp);
3936
			} else {
3937
				tmp = RREG32(HDMI1_AUDIO_PACKET_CONTROL);
3938
				tmp |= HDMI0_AZ_FORMAT_WTRIG_ACK;
3939
				WREG32(HDMI1_AUDIO_PACKET_CONTROL, tmp);
6104 serge 3940
			}
2004 serge 3941
		}
3942
	}
3943
}
3944
 
3192 Serge 3945
void r600_irq_disable(struct radeon_device *rdev)
3946
{
3947
	r600_disable_interrupts(rdev);
3948
	/* Wait and acknowledge irq */
3949
	mdelay(1);
3950
	r600_irq_ack(rdev);
3951
	r600_disable_interrupt_state(rdev);
3952
}
3953
 
2997 Serge 3954
static u32 r600_get_ih_wptr(struct radeon_device *rdev)
2004 serge 3955
{
3956
	u32 wptr, tmp;
3957
 
3958
	if (rdev->wb.enabled)
3959
		wptr = le32_to_cpu(rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4]);
3960
	else
3961
		wptr = RREG32(IH_RB_WPTR);
3962
 
3963
	if (wptr & RB_OVERFLOW) {
5179 serge 3964
		wptr &= ~RB_OVERFLOW;
2004 serge 3965
		/* When a ring buffer overflow happen start parsing interrupt
3966
		 * from the last not overwritten vector (wptr + 16). Hopefully
3967
		 * this should allow us to catchup.
3968
		 */
5179 serge 3969
		dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
3970
			 wptr, rdev->ih.rptr, (wptr + 16) & rdev->ih.ptr_mask);
2004 serge 3971
		rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
3972
		tmp = RREG32(IH_RB_CNTL);
3973
		tmp |= IH_WPTR_OVERFLOW_CLEAR;
3974
		WREG32(IH_RB_CNTL, tmp);
3975
	}
3976
	return (wptr & rdev->ih.ptr_mask);
3977
}
3978
 
3979
/*        r600 IV Ring
3980
 * Each IV ring entry is 128 bits:
3981
 * [7:0]    - interrupt source id
3982
 * [31:8]   - reserved
3983
 * [59:32]  - interrupt source data
3984
 * [127:60]  - reserved
3985
 *
3986
 * The basic interrupt vector entries
3987
 * are decoded as follows:
3988
 * src_id  src_data  description
3989
 *      1         0  D1 Vblank
3990
 *      1         1  D1 Vline
3991
 *      5         0  D2 Vblank
3992
 *      5         1  D2 Vline
3993
 *     19         0  FP Hot plug detection A
3994
 *     19         1  FP Hot plug detection B
3995
 *     19         2  DAC A auto-detection
3996
 *     19         3  DAC B auto-detection
3997
 *     21         4  HDMI block A
3998
 *     21         5  HDMI block B
3999
 *    176         -  CP_INT RB
4000
 *    177         -  CP_INT IB1
4001
 *    178         -  CP_INT IB2
4002
 *    181         -  EOP Interrupt
4003
 *    233         -  GUI Idle
4004
 *
4005
 * Note, these are based on r600 and may need to be
4006
 * adjusted or added to on newer asics
4007
 */
2160 serge 4008
 
2004 serge 4009
int r600_irq_process(struct radeon_device *rdev)
4010
{
4011
	u32 wptr;
4012
	u32 rptr;
4013
	u32 src_id, src_data;
4014
	u32 ring_index;
4015
	bool queue_hotplug = false;
2997 Serge 4016
	bool queue_hdmi = false;
5078 serge 4017
	bool queue_thermal = false;
2004 serge 4018
 
4019
	if (!rdev->ih.enabled || rdev->shutdown)
4020
		return IRQ_NONE;
4021
 
2160 serge 4022
	/* No MSIs, need a dummy read to flush PCI DMAs */
4023
	if (!rdev->msi_enabled)
4024
		RREG32(IH_RB_WPTR);
4025
 
2004 serge 4026
	wptr = r600_get_ih_wptr(rdev);
4027
 
2997 Serge 4028
restart_ih:
4029
	/* is somebody else already processing irqs? */
4030
	if (atomic_xchg(&rdev->ih.lock, 1))
2004 serge 4031
		return IRQ_NONE;
4032
 
2997 Serge 4033
	rptr = rdev->ih.rptr;
4034
	DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
4035
 
2160 serge 4036
	/* Order reading of wptr vs. reading of IH ring data */
4037
	rmb();
4038
 
2004 serge 4039
	/* display interrupts */
4040
	r600_irq_ack(rdev);
4041
 
4042
	while (rptr != wptr) {
4043
		/* wptr/rptr are in bytes! */
4044
		ring_index = rptr / 4;
4045
		src_id = le32_to_cpu(rdev->ih.ring[ring_index]) & 0xff;
4046
		src_data = le32_to_cpu(rdev->ih.ring[ring_index + 1]) & 0xfffffff;
4047
 
4048
		switch (src_id) {
4049
		case 1: /* D1 vblank/vline */
4050
			switch (src_data) {
4051
			case 0: /* D1 vblank */
6104 serge 4052
				if (!(rdev->irq.stat_regs.r600.disp_int & LB_D1_VBLANK_INTERRUPT))
4053
					DRM_DEBUG("IH: D1 vblank - IH event w/o asserted irq bit?\n");
4054
 
4055
				if (rdev->irq.crtc_vblank_int[0]) {
4056
					drm_handle_vblank(rdev->ddev, 0);
4057
					rdev->pm.vblank_sync = true;
4058
					wake_up(&rdev->irq.vblank_queue);
2004 serge 4059
				}
6104 serge 4060
				if (atomic_read(&rdev->irq.pflip[0]))
4061
					radeon_crtc_handle_vblank(rdev, 0);
4062
				rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
4063
				DRM_DEBUG("IH: D1 vblank\n");
4064
 
2004 serge 4065
				break;
4066
			case 1: /* D1 vline */
6104 serge 4067
				if (!(rdev->irq.stat_regs.r600.disp_int & LB_D1_VLINE_INTERRUPT))
4068
				    DRM_DEBUG("IH: D1 vline - IH event w/o asserted irq bit?\n");
4069
 
4070
				rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VLINE_INTERRUPT;
4071
				DRM_DEBUG("IH: D1 vline\n");
4072
 
2004 serge 4073
				break;
4074
			default:
4075
				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
4076
				break;
4077
			}
4078
			break;
4079
		case 5: /* D2 vblank/vline */
4080
			switch (src_data) {
4081
			case 0: /* D2 vblank */
6104 serge 4082
				if (!(rdev->irq.stat_regs.r600.disp_int & LB_D2_VBLANK_INTERRUPT))
4083
					DRM_DEBUG("IH: D2 vblank - IH event w/o asserted irq bit?\n");
4084
 
4085
				if (rdev->irq.crtc_vblank_int[1]) {
4086
					drm_handle_vblank(rdev->ddev, 1);
4087
					rdev->pm.vblank_sync = true;
4088
					wake_up(&rdev->irq.vblank_queue);
2004 serge 4089
				}
6104 serge 4090
				if (atomic_read(&rdev->irq.pflip[1]))
4091
					radeon_crtc_handle_vblank(rdev, 1);
4092
				rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VBLANK_INTERRUPT;
4093
				DRM_DEBUG("IH: D2 vblank\n");
4094
 
2004 serge 4095
				break;
4096
			case 1: /* D1 vline */
6104 serge 4097
				if (!(rdev->irq.stat_regs.r600.disp_int & LB_D2_VLINE_INTERRUPT))
4098
					DRM_DEBUG("IH: D2 vline - IH event w/o asserted irq bit?\n");
4099
 
4100
				rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VLINE_INTERRUPT;
4101
				DRM_DEBUG("IH: D2 vline\n");
4102
 
2004 serge 4103
				break;
4104
			default:
4105
				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
4106
				break;
4107
			}
4108
			break;
6104 serge 4109
		case 9: /* D1 pflip */
4110
			DRM_DEBUG("IH: D1 flip\n");
4111
			break;
4112
		case 11: /* D2 pflip */
4113
			DRM_DEBUG("IH: D2 flip\n");
4114
			break;
2004 serge 4115
		case 19: /* HPD/DAC hotplug */
4116
			switch (src_data) {
4117
			case 0:
6104 serge 4118
				if (!(rdev->irq.stat_regs.r600.disp_int & DC_HPD1_INTERRUPT))
4119
					DRM_DEBUG("IH: HPD1 - IH event w/o asserted irq bit?\n");
4120
 
4121
				rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD1_INTERRUPT;
4122
				queue_hotplug = true;
4123
				DRM_DEBUG("IH: HPD1\n");
2004 serge 4124
				break;
4125
			case 1:
6104 serge 4126
				if (!(rdev->irq.stat_regs.r600.disp_int & DC_HPD2_INTERRUPT))
4127
					DRM_DEBUG("IH: HPD2 - IH event w/o asserted irq bit?\n");
4128
 
4129
				rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD2_INTERRUPT;
4130
				queue_hotplug = true;
4131
				DRM_DEBUG("IH: HPD2\n");
2004 serge 4132
				break;
4133
			case 4:
6104 serge 4134
				if (!(rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD3_INTERRUPT))
4135
					DRM_DEBUG("IH: HPD3 - IH event w/o asserted irq bit?\n");
4136
 
4137
				rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD3_INTERRUPT;
4138
				queue_hotplug = true;
4139
				DRM_DEBUG("IH: HPD3\n");
2004 serge 4140
				break;
4141
			case 5:
6104 serge 4142
				if (!(rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD4_INTERRUPT))
4143
					DRM_DEBUG("IH: HPD4 - IH event w/o asserted irq bit?\n");
4144
 
4145
				rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD4_INTERRUPT;
4146
				queue_hotplug = true;
4147
				DRM_DEBUG("IH: HPD4\n");
2004 serge 4148
				break;
4149
			case 10:
6104 serge 4150
				if (!(rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD5_INTERRUPT))
4151
					DRM_DEBUG("IH: HPD5 - IH event w/o asserted irq bit?\n");
4152
 
4153
				rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD5_INTERRUPT;
4154
				queue_hotplug = true;
4155
				DRM_DEBUG("IH: HPD5\n");
2004 serge 4156
				break;
4157
			case 12:
6104 serge 4158
				if (!(rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT))
4159
					DRM_DEBUG("IH: HPD6 - IH event w/o asserted irq bit?\n");
4160
 
4161
				rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD6_INTERRUPT;
4162
				queue_hotplug = true;
4163
				DRM_DEBUG("IH: HPD6\n");
4164
 
2004 serge 4165
				break;
4166
			default:
4167
				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
4168
				break;
4169
			}
4170
			break;
2997 Serge 4171
		case 21: /* hdmi */
4172
			switch (src_data) {
4173
			case 4:
6104 serge 4174
				if (!(rdev->irq.stat_regs.r600.hdmi0_status & HDMI0_AZ_FORMAT_WTRIG))
4175
					DRM_DEBUG("IH: HDMI0 - IH event w/o asserted irq bit?\n");
4176
 
4177
				rdev->irq.stat_regs.r600.hdmi0_status &= ~HDMI0_AZ_FORMAT_WTRIG;
4178
				queue_hdmi = true;
4179
				DRM_DEBUG("IH: HDMI0\n");
4180
 
2997 Serge 4181
				break;
4182
			case 5:
6104 serge 4183
				if (!(rdev->irq.stat_regs.r600.hdmi1_status & HDMI0_AZ_FORMAT_WTRIG))
4184
					DRM_DEBUG("IH: HDMI1 - IH event w/o asserted irq bit?\n");
4185
 
4186
				rdev->irq.stat_regs.r600.hdmi1_status &= ~HDMI0_AZ_FORMAT_WTRIG;
4187
				queue_hdmi = true;
4188
				DRM_DEBUG("IH: HDMI1\n");
4189
 
2997 Serge 4190
				break;
4191
			default:
4192
				DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
4193
				break;
4194
			}
2004 serge 4195
			break;
5078 serge 4196
		case 124: /* UVD */
4197
			DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data);
4198
			radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX);
4199
			break;
2004 serge 4200
		case 176: /* CP_INT in ring buffer */
4201
		case 177: /* CP_INT in IB1 */
4202
		case 178: /* CP_INT in IB2 */
4203
			DRM_DEBUG("IH: CP int: 0x%08x\n", src_data);
2997 Serge 4204
			radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
2004 serge 4205
			break;
4206
		case 181: /* CP EOP event */
4207
			DRM_DEBUG("IH: CP EOP\n");
2997 Serge 4208
			radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
2004 serge 4209
			break;
3192 Serge 4210
		case 224: /* DMA trap event */
4211
			DRM_DEBUG("IH: DMA trap\n");
4212
			radeon_fence_process(rdev, R600_RING_TYPE_DMA_INDEX);
4213
			break;
5078 serge 4214
		case 230: /* thermal low to high */
4215
			DRM_DEBUG("IH: thermal low to high\n");
4216
			rdev->pm.dpm.thermal.high_to_low = false;
4217
			queue_thermal = true;
4218
			break;
4219
		case 231: /* thermal high to low */
4220
			DRM_DEBUG("IH: thermal high to low\n");
4221
			rdev->pm.dpm.thermal.high_to_low = true;
4222
			queue_thermal = true;
4223
			break;
2004 serge 4224
		case 233: /* GUI IDLE */
4225
			DRM_DEBUG("IH: GUI idle\n");
4226
			break;
4227
		default:
4228
			DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
4229
			break;
4230
		}
4231
 
4232
		/* wptr/rptr are in bytes! */
4233
		rptr += 16;
4234
		rptr &= rdev->ih.ptr_mask;
5179 serge 4235
		WREG32(IH_RB_RPTR, rptr);
2004 serge 4236
	}
2997 Serge 4237
	rdev->ih.rptr = rptr;
4238
	atomic_set(&rdev->ih.lock, 0);
4239
 
2004 serge 4240
	/* make sure wptr hasn't changed while processing */
4241
	wptr = r600_get_ih_wptr(rdev);
2997 Serge 4242
	if (wptr != rptr)
2004 serge 4243
		goto restart_ih;
2997 Serge 4244
 
2004 serge 4245
	return IRQ_HANDLED;
4246
}
4247
 
1221 serge 4248
/*
4249
 * Debugfs info
4250
 */
4251
#if defined(CONFIG_DEBUG_FS)
4252
 
4253
static int r600_debugfs_mc_info(struct seq_file *m, void *data)
4254
{
4255
	struct drm_info_node *node = (struct drm_info_node *) m->private;
4256
	struct drm_device *dev = node->minor->dev;
4257
	struct radeon_device *rdev = dev->dev_private;
4258
 
4259
	DREG32_SYS(m, rdev, R_000E50_SRBM_STATUS);
4260
	DREG32_SYS(m, rdev, VM_L2_STATUS);
4261
	return 0;
4262
}
4263
 
4264
static struct drm_info_list r600_mc_info_list[] = {
4265
	{"r600_mc_info", r600_debugfs_mc_info, 0, NULL},
4266
};
4267
#endif
4268
 
4269
int r600_debugfs_mc_info_init(struct radeon_device *rdev)
4270
{
4271
#if defined(CONFIG_DEBUG_FS)
4272
	return radeon_debugfs_add_files(rdev, r600_mc_info_list, ARRAY_SIZE(r600_mc_info_list));
4273
#else
4274
	return 0;
4275
#endif
4276
}
1404 serge 4277
 
4278
/**
5078 serge 4279
 * r600_mmio_hdp_flush - flush Host Data Path cache via MMIO
1404 serge 4280
 * rdev: radeon device structure
4281
 *
5078 serge 4282
 * Some R6XX/R7XX don't seem to take into account HDP flushes performed
4283
 * through the ring buffer. This leads to corruption in rendering, see
4284
 * http://bugzilla.kernel.org/show_bug.cgi?id=15186 . To avoid this, we
4285
 * directly perform the HDP flush by writing the register through MMIO.
1404 serge 4286
 */
5078 serge 4287
void r600_mmio_hdp_flush(struct radeon_device *rdev)
1404 serge 4288
{
1963 serge 4289
	/* r7xx hw bug.  write to HDP_DEBUG1 followed by fb read
4290
	 * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL.
4291
	 * This seems to cause problems on some AGP cards. Just use the old
4292
	 * method for them.
4293
	 */
4294
	if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) &&
4295
	    rdev->vram_scratch.ptr && !(rdev->flags & RADEON_IS_AGP)) {
4296
		void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
4297
		u32 tmp;
4298
 
4299
		WREG32(HDP_DEBUG1, 0);
4300
		tmp = readl((void __iomem *)ptr);
4301
	} else
6104 serge 4302
		WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
1404 serge 4303
}
1963 serge 4304
 
4305
void r600_set_pcie_lanes(struct radeon_device *rdev, int lanes)
4306
{
3764 Serge 4307
	u32 link_width_cntl, mask;
1963 serge 4308
 
4309
	if (rdev->flags & RADEON_IS_IGP)
4310
		return;
4311
 
4312
	if (!(rdev->flags & RADEON_IS_PCIE))
4313
		return;
4314
 
4315
	/* x2 cards have a special sequence */
4316
	if (ASIC_IS_X2(rdev))
4317
		return;
4318
 
3764 Serge 4319
	radeon_gui_idle(rdev);
1963 serge 4320
 
4321
	switch (lanes) {
4322
	case 0:
4323
		mask = RADEON_PCIE_LC_LINK_WIDTH_X0;
4324
		break;
4325
	case 1:
4326
		mask = RADEON_PCIE_LC_LINK_WIDTH_X1;
4327
		break;
4328
	case 2:
4329
		mask = RADEON_PCIE_LC_LINK_WIDTH_X2;
4330
		break;
4331
	case 4:
4332
		mask = RADEON_PCIE_LC_LINK_WIDTH_X4;
4333
		break;
4334
	case 8:
4335
		mask = RADEON_PCIE_LC_LINK_WIDTH_X8;
4336
		break;
4337
	case 12:
3764 Serge 4338
		/* not actually supported */
1963 serge 4339
		mask = RADEON_PCIE_LC_LINK_WIDTH_X12;
4340
		break;
4341
	case 16:
4342
		mask = RADEON_PCIE_LC_LINK_WIDTH_X16;
4343
		break;
3764 Serge 4344
	default:
4345
		DRM_ERROR("invalid pcie lane request: %d\n", lanes);
4346
		return;
1963 serge 4347
	}
4348
 
3764 Serge 4349
	link_width_cntl = RREG32_PCIE_PORT(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
4350
	link_width_cntl &= ~RADEON_PCIE_LC_LINK_WIDTH_MASK;
4351
	link_width_cntl |= mask << RADEON_PCIE_LC_LINK_WIDTH_SHIFT;
4352
	link_width_cntl |= (RADEON_PCIE_LC_RECONFIG_NOW |
6104 serge 4353
			    R600_PCIE_LC_RECONFIG_ARC_MISSING_ESCAPE);
1963 serge 4354
 
3764 Serge 4355
	WREG32_PCIE_PORT(RADEON_PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
1963 serge 4356
}
4357
 
4358
int r600_get_pcie_lanes(struct radeon_device *rdev)
4359
{
4360
	u32 link_width_cntl;
4361
 
4362
	if (rdev->flags & RADEON_IS_IGP)
4363
		return 0;
4364
 
4365
	if (!(rdev->flags & RADEON_IS_PCIE))
4366
		return 0;
4367
 
4368
	/* x2 cards have a special sequence */
4369
	if (ASIC_IS_X2(rdev))
4370
		return 0;
4371
 
3764 Serge 4372
	radeon_gui_idle(rdev);
1963 serge 4373
 
3764 Serge 4374
	link_width_cntl = RREG32_PCIE_PORT(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
1963 serge 4375
 
4376
	switch ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) >> RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT) {
4377
	case RADEON_PCIE_LC_LINK_WIDTH_X1:
4378
		return 1;
4379
	case RADEON_PCIE_LC_LINK_WIDTH_X2:
4380
		return 2;
4381
	case RADEON_PCIE_LC_LINK_WIDTH_X4:
4382
		return 4;
4383
	case RADEON_PCIE_LC_LINK_WIDTH_X8:
4384
		return 8;
3764 Serge 4385
	case RADEON_PCIE_LC_LINK_WIDTH_X12:
4386
		/* not actually supported */
4387
		return 12;
4388
	case RADEON_PCIE_LC_LINK_WIDTH_X0:
1963 serge 4389
	case RADEON_PCIE_LC_LINK_WIDTH_X16:
4390
	default:
4391
		return 16;
4392
	}
4393
}
4394
 
4395
static void r600_pcie_gen2_enable(struct radeon_device *rdev)
4396
{
4397
	u32 link_width_cntl, lanes, speed_cntl, training_cntl, tmp;
4398
	u16 link_cntl2;
4399
 
4400
	if (radeon_pcie_gen2 == 0)
4401
		return;
4402
 
4403
	if (rdev->flags & RADEON_IS_IGP)
4404
		return;
4405
 
4406
	if (!(rdev->flags & RADEON_IS_PCIE))
4407
		return;
4408
 
4409
	/* x2 cards have a special sequence */
4410
	if (ASIC_IS_X2(rdev))
4411
		return;
4412
 
4413
	/* only RV6xx+ chips are supported */
4414
	if (rdev->family <= CHIP_R600)
4415
		return;
4416
 
3764 Serge 4417
	if ((rdev->pdev->bus->max_bus_speed != PCIE_SPEED_5_0GT) &&
4418
		(rdev->pdev->bus->max_bus_speed != PCIE_SPEED_8_0GT))
2997 Serge 4419
		return;
4420
 
3764 Serge 4421
	speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
2997 Serge 4422
	if (speed_cntl & LC_CURRENT_DATA_RATE) {
4423
		DRM_INFO("PCIE gen 2 link speeds already enabled\n");
4424
		return;
4425
	}
4426
 
4427
	DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n");
4428
 
1963 serge 4429
	/* 55 nm r6xx asics */
4430
	if ((rdev->family == CHIP_RV670) ||
4431
	    (rdev->family == CHIP_RV620) ||
4432
	    (rdev->family == CHIP_RV635)) {
4433
		/* advertise upconfig capability */
3764 Serge 4434
		link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
1963 serge 4435
		link_width_cntl &= ~LC_UPCONFIGURE_DIS;
3764 Serge 4436
		WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
4437
		link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
1963 serge 4438
		if (link_width_cntl & LC_RENEGOTIATION_SUPPORT) {
4439
			lanes = (link_width_cntl & LC_LINK_WIDTH_RD_MASK) >> LC_LINK_WIDTH_RD_SHIFT;
4440
			link_width_cntl &= ~(LC_LINK_WIDTH_MASK |
4441
					     LC_RECONFIG_ARC_MISSING_ESCAPE);
4442
			link_width_cntl |= lanes | LC_RECONFIG_NOW | LC_RENEGOTIATE_EN;
3764 Serge 4443
			WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
1963 serge 4444
		} else {
4445
			link_width_cntl |= LC_UPCONFIGURE_DIS;
3764 Serge 4446
			WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
1963 serge 4447
		}
4448
	}
4449
 
3764 Serge 4450
	speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
1963 serge 4451
	if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) &&
4452
	    (speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) {
4453
 
4454
		/* 55 nm r6xx asics */
4455
		if ((rdev->family == CHIP_RV670) ||
4456
		    (rdev->family == CHIP_RV620) ||
4457
		    (rdev->family == CHIP_RV635)) {
4458
			WREG32(MM_CFGREGS_CNTL, 0x8);
4459
			link_cntl2 = RREG32(0x4088);
4460
			WREG32(MM_CFGREGS_CNTL, 0);
4461
			/* not supported yet */
4462
			if (link_cntl2 & SELECTABLE_DEEMPHASIS)
4463
				return;
4464
		}
4465
 
4466
		speed_cntl &= ~LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK;
4467
		speed_cntl |= (0x3 << LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT);
4468
		speed_cntl &= ~LC_VOLTAGE_TIMER_SEL_MASK;
4469
		speed_cntl &= ~LC_FORCE_DIS_HW_SPEED_CHANGE;
4470
		speed_cntl |= LC_FORCE_EN_HW_SPEED_CHANGE;
3764 Serge 4471
		WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
1963 serge 4472
 
4473
		tmp = RREG32(0x541c);
4474
		WREG32(0x541c, tmp | 0x8);
4475
		WREG32(MM_CFGREGS_CNTL, MM_WR_TO_CFG_EN);
4476
		link_cntl2 = RREG16(0x4088);
4477
		link_cntl2 &= ~TARGET_LINK_SPEED_MASK;
4478
		link_cntl2 |= 0x2;
4479
		WREG16(0x4088, link_cntl2);
4480
		WREG32(MM_CFGREGS_CNTL, 0);
4481
 
4482
		if ((rdev->family == CHIP_RV670) ||
4483
		    (rdev->family == CHIP_RV620) ||
4484
		    (rdev->family == CHIP_RV635)) {
3764 Serge 4485
			training_cntl = RREG32_PCIE_PORT(PCIE_LC_TRAINING_CNTL);
1963 serge 4486
			training_cntl &= ~LC_POINT_7_PLUS_EN;
3764 Serge 4487
			WREG32_PCIE_PORT(PCIE_LC_TRAINING_CNTL, training_cntl);
1963 serge 4488
		} else {
3764 Serge 4489
			speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
1963 serge 4490
			speed_cntl &= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN;
3764 Serge 4491
			WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
1963 serge 4492
		}
4493
 
3764 Serge 4494
		speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
1963 serge 4495
		speed_cntl |= LC_GEN2_EN_STRAP;
3764 Serge 4496
		WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
1963 serge 4497
 
4498
	} else {
3764 Serge 4499
		link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
1963 serge 4500
		/* XXX: only disable it if gen1 bridge vendor == 0x111d or 0x1106 */
4501
		if (1)
4502
			link_width_cntl |= LC_UPCONFIGURE_DIS;
4503
		else
4504
			link_width_cntl &= ~LC_UPCONFIGURE_DIS;
3764 Serge 4505
		WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
1963 serge 4506
	}
4507
}
2997 Serge 4508
 
4509
/**
3764 Serge 4510
 * r600_get_gpu_clock_counter - return GPU clock counter snapshot
2997 Serge 4511
 *
4512
 * @rdev: radeon_device pointer
4513
 *
4514
 * Fetches a GPU clock counter snapshot (R6xx-cayman).
4515
 * Returns the 64 bit clock counter snapshot.
4516
 */
3764 Serge 4517
uint64_t r600_get_gpu_clock_counter(struct radeon_device *rdev)
2997 Serge 4518
{
4519
	uint64_t clock;
4520
 
4521
	mutex_lock(&rdev->gpu_clock_mutex);
4522
	WREG32(RLC_CAPTURE_GPU_CLOCK_COUNT, 1);
4523
	clock = (uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_LSB) |
7146 serge 4524
		((uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
2997 Serge 4525
	mutex_unlock(&rdev->gpu_clock_mutex);
4526
	return clock;
4527
}