Subversion Repositories Kolibri OS

Rev

Rev 1986 | Rev 2005 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 1986 Rev 2004
1
/*
1
/*
2
 * Copyright 2008 Advanced Micro Devices, Inc.
2
 * Copyright 2008 Advanced Micro Devices, Inc.
3
 * Copyright 2008 Red Hat Inc.
3
 * Copyright 2008 Red Hat Inc.
4
 * Copyright 2009 Jerome Glisse.
4
 * Copyright 2009 Jerome Glisse.
5
 *
5
 *
6
 * Permission is hereby granted, free of charge, to any person obtaining a
6
 * Permission is hereby granted, free of charge, to any person obtaining a
7
 * copy of this software and associated documentation files (the "Software"),
7
 * copy of this software and associated documentation files (the "Software"),
8
 * to deal in the Software without restriction, including without limitation
8
 * to deal in the Software without restriction, including without limitation
9
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10
 * and/or sell copies of the Software, and to permit persons to whom the
10
 * and/or sell copies of the Software, and to permit persons to whom the
11
 * Software is furnished to do so, subject to the following conditions:
11
 * Software is furnished to do so, subject to the following conditions:
12
 *
12
 *
13
 * The above copyright notice and this permission notice shall be included in
13
 * The above copyright notice and this permission notice shall be included in
14
 * all copies or substantial portions of the Software.
14
 * all copies or substantial portions of the Software.
15
 *
15
 *
16
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22
 * OTHER DEALINGS IN THE SOFTWARE.
22
 * OTHER DEALINGS IN THE SOFTWARE.
23
 *
23
 *
24
 * Authors: Dave Airlie
24
 * Authors: Dave Airlie
25
 *          Alex Deucher
25
 *          Alex Deucher
26
 *          Jerome Glisse
26
 *          Jerome Glisse
27
 */
27
 */
28
#include 
28
#include 
29
#include 
29
#include 
30
#include 
30
#include 
31
#include "drmP.h"
31
#include "drmP.h"
32
#include "radeon_drm.h"
32
#include "radeon_drm.h"
33
#include "radeon.h"
33
#include "radeon.h"
34
#include "radeon_asic.h"
34
#include "radeon_asic.h"
35
#include "radeon_mode.h"
35
#include "radeon_mode.h"
36
#include "r600d.h"
36
#include "r600d.h"
37
#include "atom.h"
37
#include "atom.h"
38
#include "avivod.h"
38
#include "avivod.h"
39
 
39
 
40
#define PFP_UCODE_SIZE 576
40
#define PFP_UCODE_SIZE 576
41
#define PM4_UCODE_SIZE 1792
41
#define PM4_UCODE_SIZE 1792
42
#define RLC_UCODE_SIZE 768
42
#define RLC_UCODE_SIZE 768
43
#define R700_PFP_UCODE_SIZE 848
43
#define R700_PFP_UCODE_SIZE 848
44
#define R700_PM4_UCODE_SIZE 1360
44
#define R700_PM4_UCODE_SIZE 1360
45
#define R700_RLC_UCODE_SIZE 1024
45
#define R700_RLC_UCODE_SIZE 1024
46
#define EVERGREEN_PFP_UCODE_SIZE 1120
46
#define EVERGREEN_PFP_UCODE_SIZE 1120
47
#define EVERGREEN_PM4_UCODE_SIZE 1376
47
#define EVERGREEN_PM4_UCODE_SIZE 1376
48
#define EVERGREEN_RLC_UCODE_SIZE 768
48
#define EVERGREEN_RLC_UCODE_SIZE 768
49
#define CAYMAN_RLC_UCODE_SIZE 1024
49
#define CAYMAN_RLC_UCODE_SIZE 1024
50
 
50
 
51
/* Firmware Names */
51
/* Firmware Names */
52
MODULE_FIRMWARE("radeon/R600_pfp.bin");
52
MODULE_FIRMWARE("radeon/R600_pfp.bin");
53
MODULE_FIRMWARE("radeon/R600_me.bin");
53
MODULE_FIRMWARE("radeon/R600_me.bin");
54
MODULE_FIRMWARE("radeon/RV610_pfp.bin");
54
MODULE_FIRMWARE("radeon/RV610_pfp.bin");
55
MODULE_FIRMWARE("radeon/RV610_me.bin");
55
MODULE_FIRMWARE("radeon/RV610_me.bin");
56
MODULE_FIRMWARE("radeon/RV630_pfp.bin");
56
MODULE_FIRMWARE("radeon/RV630_pfp.bin");
57
MODULE_FIRMWARE("radeon/RV630_me.bin");
57
MODULE_FIRMWARE("radeon/RV630_me.bin");
58
MODULE_FIRMWARE("radeon/RV620_pfp.bin");
58
MODULE_FIRMWARE("radeon/RV620_pfp.bin");
59
MODULE_FIRMWARE("radeon/RV620_me.bin");
59
MODULE_FIRMWARE("radeon/RV620_me.bin");
60
MODULE_FIRMWARE("radeon/RV635_pfp.bin");
60
MODULE_FIRMWARE("radeon/RV635_pfp.bin");
61
MODULE_FIRMWARE("radeon/RV635_me.bin");
61
MODULE_FIRMWARE("radeon/RV635_me.bin");
62
MODULE_FIRMWARE("radeon/RV670_pfp.bin");
62
MODULE_FIRMWARE("radeon/RV670_pfp.bin");
63
MODULE_FIRMWARE("radeon/RV670_me.bin");
63
MODULE_FIRMWARE("radeon/RV670_me.bin");
64
MODULE_FIRMWARE("radeon/RS780_pfp.bin");
64
MODULE_FIRMWARE("radeon/RS780_pfp.bin");
65
MODULE_FIRMWARE("radeon/RS780_me.bin");
65
MODULE_FIRMWARE("radeon/RS780_me.bin");
66
MODULE_FIRMWARE("radeon/RV770_pfp.bin");
66
MODULE_FIRMWARE("radeon/RV770_pfp.bin");
67
MODULE_FIRMWARE("radeon/RV770_me.bin");
67
MODULE_FIRMWARE("radeon/RV770_me.bin");
68
MODULE_FIRMWARE("radeon/RV730_pfp.bin");
68
MODULE_FIRMWARE("radeon/RV730_pfp.bin");
69
MODULE_FIRMWARE("radeon/RV730_me.bin");
69
MODULE_FIRMWARE("radeon/RV730_me.bin");
70
MODULE_FIRMWARE("radeon/RV710_pfp.bin");
70
MODULE_FIRMWARE("radeon/RV710_pfp.bin");
71
MODULE_FIRMWARE("radeon/RV710_me.bin");
71
MODULE_FIRMWARE("radeon/RV710_me.bin");
72
MODULE_FIRMWARE("radeon/R600_rlc.bin");
72
MODULE_FIRMWARE("radeon/R600_rlc.bin");
73
MODULE_FIRMWARE("radeon/R700_rlc.bin");
73
MODULE_FIRMWARE("radeon/R700_rlc.bin");
74
MODULE_FIRMWARE("radeon/CEDAR_pfp.bin");
74
MODULE_FIRMWARE("radeon/CEDAR_pfp.bin");
75
MODULE_FIRMWARE("radeon/CEDAR_me.bin");
75
MODULE_FIRMWARE("radeon/CEDAR_me.bin");
76
MODULE_FIRMWARE("radeon/CEDAR_rlc.bin");
76
MODULE_FIRMWARE("radeon/CEDAR_rlc.bin");
77
MODULE_FIRMWARE("radeon/REDWOOD_pfp.bin");
77
MODULE_FIRMWARE("radeon/REDWOOD_pfp.bin");
78
MODULE_FIRMWARE("radeon/REDWOOD_me.bin");
78
MODULE_FIRMWARE("radeon/REDWOOD_me.bin");
79
MODULE_FIRMWARE("radeon/REDWOOD_rlc.bin");
79
MODULE_FIRMWARE("radeon/REDWOOD_rlc.bin");
80
MODULE_FIRMWARE("radeon/JUNIPER_pfp.bin");
80
MODULE_FIRMWARE("radeon/JUNIPER_pfp.bin");
81
MODULE_FIRMWARE("radeon/JUNIPER_me.bin");
81
MODULE_FIRMWARE("radeon/JUNIPER_me.bin");
82
MODULE_FIRMWARE("radeon/JUNIPER_rlc.bin");
82
MODULE_FIRMWARE("radeon/JUNIPER_rlc.bin");
83
MODULE_FIRMWARE("radeon/CYPRESS_pfp.bin");
83
MODULE_FIRMWARE("radeon/CYPRESS_pfp.bin");
84
MODULE_FIRMWARE("radeon/CYPRESS_me.bin");
84
MODULE_FIRMWARE("radeon/CYPRESS_me.bin");
85
MODULE_FIRMWARE("radeon/CYPRESS_rlc.bin");
85
MODULE_FIRMWARE("radeon/CYPRESS_rlc.bin");
86
MODULE_FIRMWARE("radeon/PALM_pfp.bin");
86
MODULE_FIRMWARE("radeon/PALM_pfp.bin");
87
MODULE_FIRMWARE("radeon/PALM_me.bin");
87
MODULE_FIRMWARE("radeon/PALM_me.bin");
88
MODULE_FIRMWARE("radeon/SUMO_rlc.bin");
88
MODULE_FIRMWARE("radeon/SUMO_rlc.bin");
89
MODULE_FIRMWARE("radeon/SUMO_pfp.bin");
89
MODULE_FIRMWARE("radeon/SUMO_pfp.bin");
90
MODULE_FIRMWARE("radeon/SUMO_me.bin");
90
MODULE_FIRMWARE("radeon/SUMO_me.bin");
91
MODULE_FIRMWARE("radeon/SUMO2_pfp.bin");
91
MODULE_FIRMWARE("radeon/SUMO2_pfp.bin");
92
MODULE_FIRMWARE("radeon/SUMO2_me.bin");
92
MODULE_FIRMWARE("radeon/SUMO2_me.bin");
93
 
93
 
94
int r600_debugfs_mc_info_init(struct radeon_device *rdev);
94
int r600_debugfs_mc_info_init(struct radeon_device *rdev);
95
 
95
 
96
/* r600,rv610,rv630,rv620,rv635,rv670 */
96
/* r600,rv610,rv630,rv620,rv635,rv670 */
97
int r600_mc_wait_for_idle(struct radeon_device *rdev);
97
int r600_mc_wait_for_idle(struct radeon_device *rdev);
98
void r600_gpu_init(struct radeon_device *rdev);
98
void r600_gpu_init(struct radeon_device *rdev);
99
void r600_fini(struct radeon_device *rdev);
99
void r600_fini(struct radeon_device *rdev);
100
void r600_irq_disable(struct radeon_device *rdev);
100
void r600_irq_disable(struct radeon_device *rdev);
101
static void r600_pcie_gen2_enable(struct radeon_device *rdev);
101
static void r600_pcie_gen2_enable(struct radeon_device *rdev);
102
 
102
 
103
/* get temperature in millidegrees */
103
/* get temperature in millidegrees */
104
int rv6xx_get_temp(struct radeon_device *rdev)
104
int rv6xx_get_temp(struct radeon_device *rdev)
105
{
105
{
106
	u32 temp = (RREG32(CG_THERMAL_STATUS) & ASIC_T_MASK) >>
106
	u32 temp = (RREG32(CG_THERMAL_STATUS) & ASIC_T_MASK) >>
107
		ASIC_T_SHIFT;
107
		ASIC_T_SHIFT;
108
	int actual_temp = temp & 0xff;
108
	int actual_temp = temp & 0xff;
109
 
109
 
110
	if (temp & 0x100)
110
	if (temp & 0x100)
111
		actual_temp -= 256;
111
		actual_temp -= 256;
112
 
112
 
113
	return actual_temp * 1000;
113
	return actual_temp * 1000;
114
}
114
}
115
 
115
 
116
 
116
 
117
 
117
 
118
 
118
 
119
 
119
 
120
 
120
 
121
bool r600_gui_idle(struct radeon_device *rdev)
121
bool r600_gui_idle(struct radeon_device *rdev)
122
{
122
{
123
	if (RREG32(GRBM_STATUS) & GUI_ACTIVE)
123
	if (RREG32(GRBM_STATUS) & GUI_ACTIVE)
124
		return false;
124
		return false;
125
	else
125
	else
126
		return true;
126
		return true;
127
}
127
}
128
 
128
 
129
/* hpd for digital panel detect/disconnect */
129
/* hpd for digital panel detect/disconnect */
130
bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
130
bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
131
{
131
{
132
	bool connected = false;
132
	bool connected = false;
133
 
133
 
134
	if (ASIC_IS_DCE3(rdev)) {
134
	if (ASIC_IS_DCE3(rdev)) {
135
		switch (hpd) {
135
		switch (hpd) {
136
		case RADEON_HPD_1:
136
		case RADEON_HPD_1:
137
			if (RREG32(DC_HPD1_INT_STATUS) & DC_HPDx_SENSE)
137
			if (RREG32(DC_HPD1_INT_STATUS) & DC_HPDx_SENSE)
138
				connected = true;
138
				connected = true;
139
			break;
139
			break;
140
		case RADEON_HPD_2:
140
		case RADEON_HPD_2:
141
			if (RREG32(DC_HPD2_INT_STATUS) & DC_HPDx_SENSE)
141
			if (RREG32(DC_HPD2_INT_STATUS) & DC_HPDx_SENSE)
142
				connected = true;
142
				connected = true;
143
			break;
143
			break;
144
		case RADEON_HPD_3:
144
		case RADEON_HPD_3:
145
			if (RREG32(DC_HPD3_INT_STATUS) & DC_HPDx_SENSE)
145
			if (RREG32(DC_HPD3_INT_STATUS) & DC_HPDx_SENSE)
146
				connected = true;
146
				connected = true;
147
			break;
147
			break;
148
		case RADEON_HPD_4:
148
		case RADEON_HPD_4:
149
			if (RREG32(DC_HPD4_INT_STATUS) & DC_HPDx_SENSE)
149
			if (RREG32(DC_HPD4_INT_STATUS) & DC_HPDx_SENSE)
150
				connected = true;
150
				connected = true;
151
			break;
151
			break;
152
			/* DCE 3.2 */
152
			/* DCE 3.2 */
153
		case RADEON_HPD_5:
153
		case RADEON_HPD_5:
154
			if (RREG32(DC_HPD5_INT_STATUS) & DC_HPDx_SENSE)
154
			if (RREG32(DC_HPD5_INT_STATUS) & DC_HPDx_SENSE)
155
				connected = true;
155
				connected = true;
156
			break;
156
			break;
157
		case RADEON_HPD_6:
157
		case RADEON_HPD_6:
158
			if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE)
158
			if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE)
159
				connected = true;
159
				connected = true;
160
			break;
160
			break;
161
		default:
161
		default:
162
			break;
162
			break;
163
		}
163
		}
164
	} else {
164
	} else {
165
		switch (hpd) {
165
		switch (hpd) {
166
		case RADEON_HPD_1:
166
		case RADEON_HPD_1:
167
			if (RREG32(DC_HOT_PLUG_DETECT1_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
167
			if (RREG32(DC_HOT_PLUG_DETECT1_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
168
				connected = true;
168
				connected = true;
169
			break;
169
			break;
170
		case RADEON_HPD_2:
170
		case RADEON_HPD_2:
171
			if (RREG32(DC_HOT_PLUG_DETECT2_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
171
			if (RREG32(DC_HOT_PLUG_DETECT2_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
172
				connected = true;
172
				connected = true;
173
			break;
173
			break;
174
		case RADEON_HPD_3:
174
		case RADEON_HPD_3:
175
			if (RREG32(DC_HOT_PLUG_DETECT3_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
175
			if (RREG32(DC_HOT_PLUG_DETECT3_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
176
				connected = true;
176
				connected = true;
177
			break;
177
			break;
178
		default:
178
		default:
179
			break;
179
			break;
180
		}
180
		}
181
	}
181
	}
182
	return connected;
182
	return connected;
183
}
183
}
184
 
184
 
185
void r600_hpd_set_polarity(struct radeon_device *rdev,
185
void r600_hpd_set_polarity(struct radeon_device *rdev,
186
			   enum radeon_hpd_id hpd)
186
			   enum radeon_hpd_id hpd)
187
{
187
{
188
	u32 tmp;
188
	u32 tmp;
189
	bool connected = r600_hpd_sense(rdev, hpd);
189
	bool connected = r600_hpd_sense(rdev, hpd);
190
 
190
 
191
	if (ASIC_IS_DCE3(rdev)) {
191
	if (ASIC_IS_DCE3(rdev)) {
192
		switch (hpd) {
192
		switch (hpd) {
193
		case RADEON_HPD_1:
193
		case RADEON_HPD_1:
194
			tmp = RREG32(DC_HPD1_INT_CONTROL);
194
			tmp = RREG32(DC_HPD1_INT_CONTROL);
195
			if (connected)
195
			if (connected)
196
				tmp &= ~DC_HPDx_INT_POLARITY;
196
				tmp &= ~DC_HPDx_INT_POLARITY;
197
			else
197
			else
198
				tmp |= DC_HPDx_INT_POLARITY;
198
				tmp |= DC_HPDx_INT_POLARITY;
199
			WREG32(DC_HPD1_INT_CONTROL, tmp);
199
			WREG32(DC_HPD1_INT_CONTROL, tmp);
200
			break;
200
			break;
201
		case RADEON_HPD_2:
201
		case RADEON_HPD_2:
202
			tmp = RREG32(DC_HPD2_INT_CONTROL);
202
			tmp = RREG32(DC_HPD2_INT_CONTROL);
203
			if (connected)
203
			if (connected)
204
				tmp &= ~DC_HPDx_INT_POLARITY;
204
				tmp &= ~DC_HPDx_INT_POLARITY;
205
			else
205
			else
206
				tmp |= DC_HPDx_INT_POLARITY;
206
				tmp |= DC_HPDx_INT_POLARITY;
207
			WREG32(DC_HPD2_INT_CONTROL, tmp);
207
			WREG32(DC_HPD2_INT_CONTROL, tmp);
208
			break;
208
			break;
209
		case RADEON_HPD_3:
209
		case RADEON_HPD_3:
210
			tmp = RREG32(DC_HPD3_INT_CONTROL);
210
			tmp = RREG32(DC_HPD3_INT_CONTROL);
211
			if (connected)
211
			if (connected)
212
				tmp &= ~DC_HPDx_INT_POLARITY;
212
				tmp &= ~DC_HPDx_INT_POLARITY;
213
			else
213
			else
214
				tmp |= DC_HPDx_INT_POLARITY;
214
				tmp |= DC_HPDx_INT_POLARITY;
215
			WREG32(DC_HPD3_INT_CONTROL, tmp);
215
			WREG32(DC_HPD3_INT_CONTROL, tmp);
216
			break;
216
			break;
217
		case RADEON_HPD_4:
217
		case RADEON_HPD_4:
218
			tmp = RREG32(DC_HPD4_INT_CONTROL);
218
			tmp = RREG32(DC_HPD4_INT_CONTROL);
219
			if (connected)
219
			if (connected)
220
				tmp &= ~DC_HPDx_INT_POLARITY;
220
				tmp &= ~DC_HPDx_INT_POLARITY;
221
			else
221
			else
222
				tmp |= DC_HPDx_INT_POLARITY;
222
				tmp |= DC_HPDx_INT_POLARITY;
223
			WREG32(DC_HPD4_INT_CONTROL, tmp);
223
			WREG32(DC_HPD4_INT_CONTROL, tmp);
224
			break;
224
			break;
225
		case RADEON_HPD_5:
225
		case RADEON_HPD_5:
226
			tmp = RREG32(DC_HPD5_INT_CONTROL);
226
			tmp = RREG32(DC_HPD5_INT_CONTROL);
227
			if (connected)
227
			if (connected)
228
				tmp &= ~DC_HPDx_INT_POLARITY;
228
				tmp &= ~DC_HPDx_INT_POLARITY;
229
			else
229
			else
230
				tmp |= DC_HPDx_INT_POLARITY;
230
				tmp |= DC_HPDx_INT_POLARITY;
231
			WREG32(DC_HPD5_INT_CONTROL, tmp);
231
			WREG32(DC_HPD5_INT_CONTROL, tmp);
232
			break;
232
			break;
233
			/* DCE 3.2 */
233
			/* DCE 3.2 */
234
		case RADEON_HPD_6:
234
		case RADEON_HPD_6:
235
			tmp = RREG32(DC_HPD6_INT_CONTROL);
235
			tmp = RREG32(DC_HPD6_INT_CONTROL);
236
			if (connected)
236
			if (connected)
237
				tmp &= ~DC_HPDx_INT_POLARITY;
237
				tmp &= ~DC_HPDx_INT_POLARITY;
238
			else
238
			else
239
				tmp |= DC_HPDx_INT_POLARITY;
239
				tmp |= DC_HPDx_INT_POLARITY;
240
			WREG32(DC_HPD6_INT_CONTROL, tmp);
240
			WREG32(DC_HPD6_INT_CONTROL, tmp);
241
			break;
241
			break;
242
		default:
242
		default:
243
			break;
243
			break;
244
		}
244
		}
245
	} else {
245
	} else {
246
		switch (hpd) {
246
		switch (hpd) {
247
		case RADEON_HPD_1:
247
		case RADEON_HPD_1:
248
			tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL);
248
			tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL);
249
			if (connected)
249
			if (connected)
250
				tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
250
				tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
251
			else
251
			else
252
				tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
252
				tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
253
			WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
253
			WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
254
			break;
254
			break;
255
		case RADEON_HPD_2:
255
		case RADEON_HPD_2:
256
			tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL);
256
			tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL);
257
			if (connected)
257
			if (connected)
258
				tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
258
				tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
259
			else
259
			else
260
				tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
260
				tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
261
			WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
261
			WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
262
			break;
262
			break;
263
		case RADEON_HPD_3:
263
		case RADEON_HPD_3:
264
			tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL);
264
			tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL);
265
			if (connected)
265
			if (connected)
266
				tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
266
				tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
267
			else
267
			else
268
				tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
268
				tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
269
			WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
269
			WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
270
			break;
270
			break;
271
		default:
271
		default:
272
			break;
272
			break;
273
		}
273
		}
274
	}
274
	}
275
}
275
}
276
 
276
 
277
void r600_hpd_init(struct radeon_device *rdev)
277
void r600_hpd_init(struct radeon_device *rdev)
278
{
278
{
279
	struct drm_device *dev = rdev->ddev;
279
	struct drm_device *dev = rdev->ddev;
280
	struct drm_connector *connector;
280
	struct drm_connector *connector;
281
 
281
 
282
	if (ASIC_IS_DCE3(rdev)) {
282
	if (ASIC_IS_DCE3(rdev)) {
283
		u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) | DC_HPDx_RX_INT_TIMER(0xfa);
283
		u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) | DC_HPDx_RX_INT_TIMER(0xfa);
284
		if (ASIC_IS_DCE32(rdev))
284
		if (ASIC_IS_DCE32(rdev))
285
			tmp |= DC_HPDx_EN;
285
			tmp |= DC_HPDx_EN;
286
 
286
 
287
		list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
287
		list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
288
			struct radeon_connector *radeon_connector = to_radeon_connector(connector);
288
			struct radeon_connector *radeon_connector = to_radeon_connector(connector);
289
			switch (radeon_connector->hpd.hpd) {
289
			switch (radeon_connector->hpd.hpd) {
290
			case RADEON_HPD_1:
290
			case RADEON_HPD_1:
291
				WREG32(DC_HPD1_CONTROL, tmp);
291
				WREG32(DC_HPD1_CONTROL, tmp);
292
//               rdev->irq.hpd[0] = true;
292
				rdev->irq.hpd[0] = true;
293
				break;
293
				break;
294
			case RADEON_HPD_2:
294
			case RADEON_HPD_2:
295
				WREG32(DC_HPD2_CONTROL, tmp);
295
				WREG32(DC_HPD2_CONTROL, tmp);
296
//               rdev->irq.hpd[1] = true;
296
				rdev->irq.hpd[1] = true;
297
				break;
297
				break;
298
			case RADEON_HPD_3:
298
			case RADEON_HPD_3:
299
				WREG32(DC_HPD3_CONTROL, tmp);
299
				WREG32(DC_HPD3_CONTROL, tmp);
300
//               rdev->irq.hpd[2] = true;
300
				rdev->irq.hpd[2] = true;
301
				break;
301
				break;
302
			case RADEON_HPD_4:
302
			case RADEON_HPD_4:
303
				WREG32(DC_HPD4_CONTROL, tmp);
303
				WREG32(DC_HPD4_CONTROL, tmp);
304
//               rdev->irq.hpd[3] = true;
304
				rdev->irq.hpd[3] = true;
305
				break;
305
				break;
306
				/* DCE 3.2 */
306
				/* DCE 3.2 */
307
			case RADEON_HPD_5:
307
			case RADEON_HPD_5:
308
				WREG32(DC_HPD5_CONTROL, tmp);
308
				WREG32(DC_HPD5_CONTROL, tmp);
309
//               rdev->irq.hpd[4] = true;
309
				rdev->irq.hpd[4] = true;
310
				break;
310
				break;
311
			case RADEON_HPD_6:
311
			case RADEON_HPD_6:
312
				WREG32(DC_HPD6_CONTROL, tmp);
312
				WREG32(DC_HPD6_CONTROL, tmp);
313
//               rdev->irq.hpd[5] = true;
313
				rdev->irq.hpd[5] = true;
314
				break;
314
				break;
315
			default:
315
			default:
316
				break;
316
				break;
317
			}
317
			}
318
		}
318
		}
319
	} else {
319
	} else {
320
		list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
320
		list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
321
			struct radeon_connector *radeon_connector = to_radeon_connector(connector);
321
			struct radeon_connector *radeon_connector = to_radeon_connector(connector);
322
			switch (radeon_connector->hpd.hpd) {
322
			switch (radeon_connector->hpd.hpd) {
323
			case RADEON_HPD_1:
323
			case RADEON_HPD_1:
324
				WREG32(DC_HOT_PLUG_DETECT1_CONTROL, DC_HOT_PLUG_DETECTx_EN);
324
				WREG32(DC_HOT_PLUG_DETECT1_CONTROL, DC_HOT_PLUG_DETECTx_EN);
325
//               rdev->irq.hpd[0] = true;
325
				rdev->irq.hpd[0] = true;
326
				break;
326
				break;
327
			case RADEON_HPD_2:
327
			case RADEON_HPD_2:
328
				WREG32(DC_HOT_PLUG_DETECT2_CONTROL, DC_HOT_PLUG_DETECTx_EN);
328
				WREG32(DC_HOT_PLUG_DETECT2_CONTROL, DC_HOT_PLUG_DETECTx_EN);
329
//               rdev->irq.hpd[1] = true;
329
				rdev->irq.hpd[1] = true;
330
				break;
330
				break;
331
			case RADEON_HPD_3:
331
			case RADEON_HPD_3:
332
				WREG32(DC_HOT_PLUG_DETECT3_CONTROL, DC_HOT_PLUG_DETECTx_EN);
332
				WREG32(DC_HOT_PLUG_DETECT3_CONTROL, DC_HOT_PLUG_DETECTx_EN);
333
//               rdev->irq.hpd[2] = true;
333
				rdev->irq.hpd[2] = true;
334
				break;
334
				break;
335
			default:
335
			default:
336
				break;
336
				break;
337
			}
337
			}
338
		}
338
		}
339
	}
339
	}
340
//   if (rdev->irq.installed)
340
	if (rdev->irq.installed)
341
//   r600_irq_set(rdev);
341
		r600_irq_set(rdev);
342
}
342
}
343
 
343
 
344
void r600_hpd_fini(struct radeon_device *rdev)
344
void r600_hpd_fini(struct radeon_device *rdev)
345
{
345
{
346
	struct drm_device *dev = rdev->ddev;
346
	struct drm_device *dev = rdev->ddev;
347
	struct drm_connector *connector;
347
	struct drm_connector *connector;
348
 
348
 
349
	if (ASIC_IS_DCE3(rdev)) {
349
	if (ASIC_IS_DCE3(rdev)) {
350
		list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
350
		list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
351
			struct radeon_connector *radeon_connector = to_radeon_connector(connector);
351
			struct radeon_connector *radeon_connector = to_radeon_connector(connector);
352
			switch (radeon_connector->hpd.hpd) {
352
			switch (radeon_connector->hpd.hpd) {
353
			case RADEON_HPD_1:
353
			case RADEON_HPD_1:
354
				WREG32(DC_HPD1_CONTROL, 0);
354
				WREG32(DC_HPD1_CONTROL, 0);
355
//               rdev->irq.hpd[0] = false;
355
				rdev->irq.hpd[0] = false;
356
				break;
356
				break;
357
			case RADEON_HPD_2:
357
			case RADEON_HPD_2:
358
				WREG32(DC_HPD2_CONTROL, 0);
358
				WREG32(DC_HPD2_CONTROL, 0);
359
//               rdev->irq.hpd[1] = false;
359
				rdev->irq.hpd[1] = false;
360
				break;
360
				break;
361
			case RADEON_HPD_3:
361
			case RADEON_HPD_3:
362
				WREG32(DC_HPD3_CONTROL, 0);
362
				WREG32(DC_HPD3_CONTROL, 0);
363
//               rdev->irq.hpd[2] = false;
363
				rdev->irq.hpd[2] = false;
364
				break;
364
				break;
365
			case RADEON_HPD_4:
365
			case RADEON_HPD_4:
366
				WREG32(DC_HPD4_CONTROL, 0);
366
				WREG32(DC_HPD4_CONTROL, 0);
367
//               rdev->irq.hpd[3] = false;
367
				rdev->irq.hpd[3] = false;
368
				break;
368
				break;
369
				/* DCE 3.2 */
369
				/* DCE 3.2 */
370
			case RADEON_HPD_5:
370
			case RADEON_HPD_5:
371
				WREG32(DC_HPD5_CONTROL, 0);
371
				WREG32(DC_HPD5_CONTROL, 0);
372
//               rdev->irq.hpd[4] = false;
372
				rdev->irq.hpd[4] = false;
373
				break;
373
				break;
374
			case RADEON_HPD_6:
374
			case RADEON_HPD_6:
375
				WREG32(DC_HPD6_CONTROL, 0);
375
				WREG32(DC_HPD6_CONTROL, 0);
376
//               rdev->irq.hpd[5] = false;
376
				rdev->irq.hpd[5] = false;
377
				break;
377
				break;
378
			default:
378
			default:
379
				break;
379
				break;
380
			}
380
			}
381
		}
381
		}
382
	} else {
382
	} else {
383
		list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
383
		list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
384
			struct radeon_connector *radeon_connector = to_radeon_connector(connector);
384
			struct radeon_connector *radeon_connector = to_radeon_connector(connector);
385
			switch (radeon_connector->hpd.hpd) {
385
			switch (radeon_connector->hpd.hpd) {
386
			case RADEON_HPD_1:
386
			case RADEON_HPD_1:
387
				WREG32(DC_HOT_PLUG_DETECT1_CONTROL, 0);
387
				WREG32(DC_HOT_PLUG_DETECT1_CONTROL, 0);
388
//               rdev->irq.hpd[0] = false;
388
				rdev->irq.hpd[0] = false;
389
				break;
389
				break;
390
			case RADEON_HPD_2:
390
			case RADEON_HPD_2:
391
				WREG32(DC_HOT_PLUG_DETECT2_CONTROL, 0);
391
				WREG32(DC_HOT_PLUG_DETECT2_CONTROL, 0);
392
//               rdev->irq.hpd[1] = false;
392
				rdev->irq.hpd[1] = false;
393
				break;
393
				break;
394
			case RADEON_HPD_3:
394
			case RADEON_HPD_3:
395
				WREG32(DC_HOT_PLUG_DETECT3_CONTROL, 0);
395
				WREG32(DC_HOT_PLUG_DETECT3_CONTROL, 0);
396
//               rdev->irq.hpd[2] = false;
396
				rdev->irq.hpd[2] = false;
397
				break;
397
				break;
398
			default:
398
			default:
399
				break;
399
				break;
400
			}
400
			}
401
		}
401
		}
402
	}
402
	}
403
}
403
}
404
 
404
 
405
/*
405
/*
406
 * R600 PCIE GART
406
 * R600 PCIE GART
407
 */
407
 */
408
void r600_pcie_gart_tlb_flush(struct radeon_device *rdev)
408
void r600_pcie_gart_tlb_flush(struct radeon_device *rdev)
409
{
409
{
410
	unsigned i;
410
	unsigned i;
411
	u32 tmp;
411
	u32 tmp;
412
 
412
 
413
	/* flush hdp cache so updates hit vram */
413
	/* flush hdp cache so updates hit vram */
414
	if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) &&
414
	if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) &&
415
	    !(rdev->flags & RADEON_IS_AGP)) {
415
	    !(rdev->flags & RADEON_IS_AGP)) {
416
		void __iomem *ptr = (void *)rdev->gart.table.vram.ptr;
416
		void __iomem *ptr = (void *)rdev->gart.table.vram.ptr;
417
		u32 tmp;
417
		u32 tmp;
418
 
418
 
419
		/* r7xx hw bug.  write to HDP_DEBUG1 followed by fb read
419
		/* r7xx hw bug.  write to HDP_DEBUG1 followed by fb read
420
		 * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL
420
		 * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL
421
		 * This seems to cause problems on some AGP cards. Just use the old
421
		 * This seems to cause problems on some AGP cards. Just use the old
422
		 * method for them.
422
		 * method for them.
423
		 */
423
		 */
424
		WREG32(HDP_DEBUG1, 0);
424
		WREG32(HDP_DEBUG1, 0);
425
		tmp = readl((void __iomem *)ptr);
425
		tmp = readl((void __iomem *)ptr);
426
	} else
426
	} else
427
	WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
427
	WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
428
 
428
 
429
	WREG32(VM_CONTEXT0_INVALIDATION_LOW_ADDR, rdev->mc.gtt_start >> 12);
429
	WREG32(VM_CONTEXT0_INVALIDATION_LOW_ADDR, rdev->mc.gtt_start >> 12);
430
	WREG32(VM_CONTEXT0_INVALIDATION_HIGH_ADDR, (rdev->mc.gtt_end - 1) >> 12);
430
	WREG32(VM_CONTEXT0_INVALIDATION_HIGH_ADDR, (rdev->mc.gtt_end - 1) >> 12);
431
	WREG32(VM_CONTEXT0_REQUEST_RESPONSE, REQUEST_TYPE(1));
431
	WREG32(VM_CONTEXT0_REQUEST_RESPONSE, REQUEST_TYPE(1));
432
	for (i = 0; i < rdev->usec_timeout; i++) {
432
	for (i = 0; i < rdev->usec_timeout; i++) {
433
		/* read MC_STATUS */
433
		/* read MC_STATUS */
434
		tmp = RREG32(VM_CONTEXT0_REQUEST_RESPONSE);
434
		tmp = RREG32(VM_CONTEXT0_REQUEST_RESPONSE);
435
		tmp = (tmp & RESPONSE_TYPE_MASK) >> RESPONSE_TYPE_SHIFT;
435
		tmp = (tmp & RESPONSE_TYPE_MASK) >> RESPONSE_TYPE_SHIFT;
436
		if (tmp == 2) {
436
		if (tmp == 2) {
437
			printk(KERN_WARNING "[drm] r600 flush TLB failed\n");
437
			printk(KERN_WARNING "[drm] r600 flush TLB failed\n");
438
			return;
438
			return;
439
		}
439
		}
440
		if (tmp) {
440
		if (tmp) {
441
			return;
441
			return;
442
		}
442
		}
443
		udelay(1);
443
		udelay(1);
444
	}
444
	}
445
}
445
}
446
 
446
 
447
int r600_pcie_gart_init(struct radeon_device *rdev)
447
int r600_pcie_gart_init(struct radeon_device *rdev)
448
{
448
{
449
	int r;
449
	int r;
450
 
450
 
451
	if (rdev->gart.table.vram.robj) {
451
	if (rdev->gart.table.vram.robj) {
452
		WARN(1, "R600 PCIE GART already initialized\n");
452
		WARN(1, "R600 PCIE GART already initialized\n");
453
		return 0;
453
		return 0;
454
	}
454
	}
455
	/* Initialize common gart structure */
455
	/* Initialize common gart structure */
456
	r = radeon_gart_init(rdev);
456
	r = radeon_gart_init(rdev);
457
	if (r)
457
	if (r)
458
		return r;
458
		return r;
459
	rdev->gart.table_size = rdev->gart.num_gpu_pages * 8;
459
	rdev->gart.table_size = rdev->gart.num_gpu_pages * 8;
460
	return radeon_gart_table_vram_alloc(rdev);
460
	return radeon_gart_table_vram_alloc(rdev);
461
}
461
}
462
 
462
 
463
int r600_pcie_gart_enable(struct radeon_device *rdev)
463
int r600_pcie_gart_enable(struct radeon_device *rdev)
464
{
464
{
465
	u32 tmp;
465
	u32 tmp;
466
	int r, i;
466
	int r, i;
467
 
467
 
468
	if (rdev->gart.table.vram.robj == NULL) {
468
	if (rdev->gart.table.vram.robj == NULL) {
469
		dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
469
		dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
470
		return -EINVAL;
470
		return -EINVAL;
471
	}
471
	}
472
	r = radeon_gart_table_vram_pin(rdev);
472
	r = radeon_gart_table_vram_pin(rdev);
473
	if (r)
473
	if (r)
474
		return r;
474
		return r;
475
	radeon_gart_restore(rdev);
475
	radeon_gart_restore(rdev);
476
 
476
 
477
	/* Setup L2 cache */
477
	/* Setup L2 cache */
478
	WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
478
	WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
479
				ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
479
				ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
480
				EFFECTIVE_L2_QUEUE_SIZE(7));
480
				EFFECTIVE_L2_QUEUE_SIZE(7));
481
	WREG32(VM_L2_CNTL2, 0);
481
	WREG32(VM_L2_CNTL2, 0);
482
	WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
482
	WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
483
	/* Setup TLB control */
483
	/* Setup TLB control */
484
	tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
484
	tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
485
		SYSTEM_ACCESS_MODE_NOT_IN_SYS |
485
		SYSTEM_ACCESS_MODE_NOT_IN_SYS |
486
		EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
486
		EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
487
		ENABLE_WAIT_L2_QUERY;
487
		ENABLE_WAIT_L2_QUERY;
488
	WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
488
	WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
489
	WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
489
	WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
490
	WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp | ENABLE_L1_STRICT_ORDERING);
490
	WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp | ENABLE_L1_STRICT_ORDERING);
491
	WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
491
	WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
492
	WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
492
	WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
493
	WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
493
	WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
494
	WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
494
	WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
495
	WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
495
	WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
496
	WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
496
	WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
497
	WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
497
	WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
498
	WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
498
	WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
499
	WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
499
	WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
500
	WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
500
	WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
501
	WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
501
	WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
502
	WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
502
	WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
503
	WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
503
	WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
504
	WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
504
	WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
505
	WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
505
	WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
506
				RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
506
				RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
507
	WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
507
	WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
508
			(u32)(rdev->dummy_page.addr >> 12));
508
			(u32)(rdev->dummy_page.addr >> 12));
509
	for (i = 1; i < 7; i++)
509
	for (i = 1; i < 7; i++)
510
		WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
510
		WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
511
 
511
 
512
	r600_pcie_gart_tlb_flush(rdev);
512
	r600_pcie_gart_tlb_flush(rdev);
513
	rdev->gart.ready = true;
513
	rdev->gart.ready = true;
514
	return 0;
514
	return 0;
515
}
515
}
516
 
516
 
517
void r600_pcie_gart_disable(struct radeon_device *rdev)
517
void r600_pcie_gart_disable(struct radeon_device *rdev)
518
{
518
{
519
	u32 tmp;
519
	u32 tmp;
520
	int i, r;
520
	int i, r;
521
 
521
 
522
	/* Disable all tables */
522
	/* Disable all tables */
523
	for (i = 0; i < 7; i++)
523
	for (i = 0; i < 7; i++)
524
		WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
524
		WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
525
 
525
 
526
	/* Disable L2 cache */
526
	/* Disable L2 cache */
527
	WREG32(VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING |
527
	WREG32(VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING |
528
				EFFECTIVE_L2_QUEUE_SIZE(7));
528
				EFFECTIVE_L2_QUEUE_SIZE(7));
529
	WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
529
	WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
530
	/* Setup L1 TLB control */
530
	/* Setup L1 TLB control */
531
	tmp = EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
531
	tmp = EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
532
		ENABLE_WAIT_L2_QUERY;
532
		ENABLE_WAIT_L2_QUERY;
533
	WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
533
	WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
534
	WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
534
	WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
535
	WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
535
	WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
536
	WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
536
	WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
537
	WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
537
	WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
538
	WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
538
	WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
539
	WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
539
	WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
540
	WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
540
	WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
541
	WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp);
541
	WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp);
542
	WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp);
542
	WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp);
543
	WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
543
	WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
544
	WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
544
	WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
545
	WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp);
545
	WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp);
546
	WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
546
	WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
547
	if (rdev->gart.table.vram.robj) {
547
	if (rdev->gart.table.vram.robj) {
548
		r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
548
		r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
549
		if (likely(r == 0)) {
549
		if (likely(r == 0)) {
550
			radeon_bo_kunmap(rdev->gart.table.vram.robj);
550
			radeon_bo_kunmap(rdev->gart.table.vram.robj);
551
			radeon_bo_unpin(rdev->gart.table.vram.robj);
551
			radeon_bo_unpin(rdev->gart.table.vram.robj);
552
			radeon_bo_unreserve(rdev->gart.table.vram.robj);
552
			radeon_bo_unreserve(rdev->gart.table.vram.robj);
553
		}
553
		}
554
	}
554
	}
555
}
555
}
556
 
556
 
557
void r600_pcie_gart_fini(struct radeon_device *rdev)
557
void r600_pcie_gart_fini(struct radeon_device *rdev)
558
{
558
{
559
	radeon_gart_fini(rdev);
559
	radeon_gart_fini(rdev);
560
	r600_pcie_gart_disable(rdev);
560
	r600_pcie_gart_disable(rdev);
561
	radeon_gart_table_vram_free(rdev);
561
	radeon_gart_table_vram_free(rdev);
562
}
562
}
563
 
563
 
564
void r600_agp_enable(struct radeon_device *rdev)
564
void r600_agp_enable(struct radeon_device *rdev)
565
{
565
{
566
	u32 tmp;
566
	u32 tmp;
567
	int i;
567
	int i;
568
 
568
 
569
	/* Setup L2 cache */
569
	/* Setup L2 cache */
570
	WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
570
	WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
571
				ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
571
				ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
572
				EFFECTIVE_L2_QUEUE_SIZE(7));
572
				EFFECTIVE_L2_QUEUE_SIZE(7));
573
	WREG32(VM_L2_CNTL2, 0);
573
	WREG32(VM_L2_CNTL2, 0);
574
	WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
574
	WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
575
	/* Setup TLB control */
575
	/* Setup TLB control */
576
	tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
576
	tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
577
		SYSTEM_ACCESS_MODE_NOT_IN_SYS |
577
		SYSTEM_ACCESS_MODE_NOT_IN_SYS |
578
		EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
578
		EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
579
		ENABLE_WAIT_L2_QUERY;
579
		ENABLE_WAIT_L2_QUERY;
580
	WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
580
	WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
581
	WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
581
	WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
582
	WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp | ENABLE_L1_STRICT_ORDERING);
582
	WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp | ENABLE_L1_STRICT_ORDERING);
583
	WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
583
	WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
584
	WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
584
	WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
585
	WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
585
	WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
586
	WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
586
	WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
587
	WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
587
	WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
588
	WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
588
	WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
589
	WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
589
	WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
590
	WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
590
	WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
591
	WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
591
	WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
592
	WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
592
	WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
593
	WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
593
	WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
594
	for (i = 0; i < 7; i++)
594
	for (i = 0; i < 7; i++)
595
		WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
595
		WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
596
}
596
}
597
 
597
 
598
int r600_mc_wait_for_idle(struct radeon_device *rdev)
598
int r600_mc_wait_for_idle(struct radeon_device *rdev)
599
{
599
{
600
	unsigned i;
600
	unsigned i;
601
	u32 tmp;
601
	u32 tmp;
602
 
602
 
603
	for (i = 0; i < rdev->usec_timeout; i++) {
603
	for (i = 0; i < rdev->usec_timeout; i++) {
604
		/* read MC_STATUS */
604
		/* read MC_STATUS */
605
		tmp = RREG32(R_000E50_SRBM_STATUS) & 0x3F00;
605
		tmp = RREG32(R_000E50_SRBM_STATUS) & 0x3F00;
606
		if (!tmp)
606
		if (!tmp)
607
	return 0;
607
	return 0;
608
		udelay(1);
608
		udelay(1);
609
	}
609
	}
610
	return -1;
610
	return -1;
611
}
611
}
612
 
612
 
613
static void r600_mc_program(struct radeon_device *rdev)
613
static void r600_mc_program(struct radeon_device *rdev)
614
{
614
{
615
	struct rv515_mc_save save;
615
	struct rv515_mc_save save;
616
	u32 tmp;
616
	u32 tmp;
617
	int i, j;
617
	int i, j;
618
 
618
 
619
	/* Initialize HDP */
619
	/* Initialize HDP */
620
	for (i = 0, j = 0; i < 32; i++, j += 0x18) {
620
	for (i = 0, j = 0; i < 32; i++, j += 0x18) {
621
		WREG32((0x2c14 + j), 0x00000000);
621
		WREG32((0x2c14 + j), 0x00000000);
622
		WREG32((0x2c18 + j), 0x00000000);
622
		WREG32((0x2c18 + j), 0x00000000);
623
		WREG32((0x2c1c + j), 0x00000000);
623
		WREG32((0x2c1c + j), 0x00000000);
624
		WREG32((0x2c20 + j), 0x00000000);
624
		WREG32((0x2c20 + j), 0x00000000);
625
		WREG32((0x2c24 + j), 0x00000000);
625
		WREG32((0x2c24 + j), 0x00000000);
626
	}
626
	}
627
	WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
627
	WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
628
 
628
 
629
	rv515_mc_stop(rdev, &save);
629
	rv515_mc_stop(rdev, &save);
630
	if (r600_mc_wait_for_idle(rdev)) {
630
	if (r600_mc_wait_for_idle(rdev)) {
631
		dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
631
		dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
632
	}
632
	}
633
	/* Lockout access through VGA aperture (doesn't exist before R600) */
633
	/* Lockout access through VGA aperture (doesn't exist before R600) */
634
	WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
634
	WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
635
	/* Update configuration */
635
	/* Update configuration */
636
	if (rdev->flags & RADEON_IS_AGP) {
636
	if (rdev->flags & RADEON_IS_AGP) {
637
		if (rdev->mc.vram_start < rdev->mc.gtt_start) {
637
		if (rdev->mc.vram_start < rdev->mc.gtt_start) {
638
			/* VRAM before AGP */
638
			/* VRAM before AGP */
639
			WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
639
			WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
640
				rdev->mc.vram_start >> 12);
640
				rdev->mc.vram_start >> 12);
641
			WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
641
			WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
642
				rdev->mc.gtt_end >> 12);
642
				rdev->mc.gtt_end >> 12);
643
		} else {
643
		} else {
644
			/* VRAM after AGP */
644
			/* VRAM after AGP */
645
			WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
645
			WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
646
				rdev->mc.gtt_start >> 12);
646
				rdev->mc.gtt_start >> 12);
647
			WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
647
			WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
648
				rdev->mc.vram_end >> 12);
648
				rdev->mc.vram_end >> 12);
649
		}
649
		}
650
	} else {
650
	} else {
651
		WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start >> 12);
651
		WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start >> 12);
652
		WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.vram_end >> 12);
652
		WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.vram_end >> 12);
653
	}
653
	}
654
	WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0);
654
	WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0);
655
	tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
655
	tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
656
	tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
656
	tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
657
	WREG32(MC_VM_FB_LOCATION, tmp);
657
	WREG32(MC_VM_FB_LOCATION, tmp);
658
	WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
658
	WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
659
	WREG32(HDP_NONSURFACE_INFO, (2 << 7));
659
	WREG32(HDP_NONSURFACE_INFO, (2 << 7));
660
	WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF);
660
	WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF);
661
	if (rdev->flags & RADEON_IS_AGP) {
661
	if (rdev->flags & RADEON_IS_AGP) {
662
		WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 22);
662
		WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 22);
663
		WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 22);
663
		WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 22);
664
		WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22);
664
		WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22);
665
	} else {
665
	} else {
666
		WREG32(MC_VM_AGP_BASE, 0);
666
		WREG32(MC_VM_AGP_BASE, 0);
667
		WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
667
		WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
668
		WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
668
		WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
669
	}
669
	}
670
	if (r600_mc_wait_for_idle(rdev)) {
670
	if (r600_mc_wait_for_idle(rdev)) {
671
		dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
671
		dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
672
	}
672
	}
673
	rv515_mc_resume(rdev, &save);
673
	rv515_mc_resume(rdev, &save);
674
	/* we need to own VRAM, so turn off the VGA renderer here
674
	/* we need to own VRAM, so turn off the VGA renderer here
675
	 * to stop it overwriting our objects */
675
	 * to stop it overwriting our objects */
676
	rv515_vga_render_disable(rdev);
676
	rv515_vga_render_disable(rdev);
677
}
677
}
678
 
678
 
679
/**
679
/**
680
 * r600_vram_gtt_location - try to find VRAM & GTT location
680
 * r600_vram_gtt_location - try to find VRAM & GTT location
681
 * @rdev: radeon device structure holding all necessary informations
681
 * @rdev: radeon device structure holding all necessary informations
682
 * @mc: memory controller structure holding memory informations
682
 * @mc: memory controller structure holding memory informations
683
 *
683
 *
684
 * Function will place try to place VRAM at same place as in CPU (PCI)
684
 * Function will place try to place VRAM at same place as in CPU (PCI)
685
 * address space as some GPU seems to have issue when we reprogram at
685
 * address space as some GPU seems to have issue when we reprogram at
686
 * different address space.
686
 * different address space.
687
 *
687
 *
688
 * If there is not enough space to fit the unvisible VRAM after the
688
 * If there is not enough space to fit the unvisible VRAM after the
689
 * aperture then we limit the VRAM size to the aperture.
689
 * aperture then we limit the VRAM size to the aperture.
690
 *
690
 *
691
 * If we are using AGP then place VRAM adjacent to AGP aperture are we need
691
 * If we are using AGP then place VRAM adjacent to AGP aperture are we need
692
 * them to be in one from GPU point of view so that we can program GPU to
692
 * them to be in one from GPU point of view so that we can program GPU to
693
 * catch access outside them (weird GPU policy see ??).
693
 * catch access outside them (weird GPU policy see ??).
694
 *
694
 *
695
 * This function will never fails, worst case are limiting VRAM or GTT.
695
 * This function will never fails, worst case are limiting VRAM or GTT.
696
 *
696
 *
697
 * Note: GTT start, end, size should be initialized before calling this
697
 * Note: GTT start, end, size should be initialized before calling this
698
 * function on AGP platform.
698
 * function on AGP platform.
699
 */
699
 */
700
static void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
700
static void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
701
{
701
{
702
	u64 size_bf, size_af;
702
	u64 size_bf, size_af;
703
 
703
 
704
	if (mc->mc_vram_size > 0xE0000000) {
704
	if (mc->mc_vram_size > 0xE0000000) {
705
		/* leave room for at least 512M GTT */
705
		/* leave room for at least 512M GTT */
706
		dev_warn(rdev->dev, "limiting VRAM\n");
706
		dev_warn(rdev->dev, "limiting VRAM\n");
707
		mc->real_vram_size = 0xE0000000;
707
		mc->real_vram_size = 0xE0000000;
708
		mc->mc_vram_size = 0xE0000000;
708
		mc->mc_vram_size = 0xE0000000;
709
	}
709
	}
710
	if (rdev->flags & RADEON_IS_AGP) {
710
	if (rdev->flags & RADEON_IS_AGP) {
711
		size_bf = mc->gtt_start;
711
		size_bf = mc->gtt_start;
712
		size_af = 0xFFFFFFFF - mc->gtt_end + 1;
712
		size_af = 0xFFFFFFFF - mc->gtt_end + 1;
713
		if (size_bf > size_af) {
713
		if (size_bf > size_af) {
714
			if (mc->mc_vram_size > size_bf) {
714
			if (mc->mc_vram_size > size_bf) {
715
				dev_warn(rdev->dev, "limiting VRAM\n");
715
				dev_warn(rdev->dev, "limiting VRAM\n");
716
				mc->real_vram_size = size_bf;
716
				mc->real_vram_size = size_bf;
717
				mc->mc_vram_size = size_bf;
717
				mc->mc_vram_size = size_bf;
718
			}
718
			}
719
			mc->vram_start = mc->gtt_start - mc->mc_vram_size;
719
			mc->vram_start = mc->gtt_start - mc->mc_vram_size;
720
		} else {
720
		} else {
721
			if (mc->mc_vram_size > size_af) {
721
			if (mc->mc_vram_size > size_af) {
722
				dev_warn(rdev->dev, "limiting VRAM\n");
722
				dev_warn(rdev->dev, "limiting VRAM\n");
723
				mc->real_vram_size = size_af;
723
				mc->real_vram_size = size_af;
724
				mc->mc_vram_size = size_af;
724
				mc->mc_vram_size = size_af;
725
			}
725
			}
726
			mc->vram_start = mc->gtt_end;
726
			mc->vram_start = mc->gtt_end;
727
		}
727
		}
728
		mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
728
		mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
729
		dev_info(rdev->dev, "VRAM: %lluM 0x%08llX - 0x%08llX (%lluM used)\n",
729
		dev_info(rdev->dev, "VRAM: %lluM 0x%08llX - 0x%08llX (%lluM used)\n",
730
				mc->mc_vram_size >> 20, mc->vram_start,
730
				mc->mc_vram_size >> 20, mc->vram_start,
731
				mc->vram_end, mc->real_vram_size >> 20);
731
				mc->vram_end, mc->real_vram_size >> 20);
732
	} else {
732
	} else {
733
		u64 base = 0;
733
		u64 base = 0;
734
		if (rdev->flags & RADEON_IS_IGP) {
734
		if (rdev->flags & RADEON_IS_IGP) {
735
			base = RREG32(MC_VM_FB_LOCATION) & 0xFFFF;
735
			base = RREG32(MC_VM_FB_LOCATION) & 0xFFFF;
736
			base <<= 24;
736
			base <<= 24;
737
		}
737
		}
738
		radeon_vram_location(rdev, &rdev->mc, base);
738
		radeon_vram_location(rdev, &rdev->mc, base);
739
		rdev->mc.gtt_base_align = 0;
739
		rdev->mc.gtt_base_align = 0;
740
		radeon_gtt_location(rdev, mc);
740
		radeon_gtt_location(rdev, mc);
741
	}
741
	}
742
}
742
}
743
 
743
 
744
int r600_mc_init(struct radeon_device *rdev)
744
int r600_mc_init(struct radeon_device *rdev)
745
{
745
{
746
	u32 tmp;
746
	u32 tmp;
747
	int chansize, numchan;
747
	int chansize, numchan;
748
 
748
 
749
	/* Get VRAM informations */
749
	/* Get VRAM informations */
750
	rdev->mc.vram_is_ddr = true;
750
	rdev->mc.vram_is_ddr = true;
751
	tmp = RREG32(RAMCFG);
751
	tmp = RREG32(RAMCFG);
752
	if (tmp & CHANSIZE_OVERRIDE) {
752
	if (tmp & CHANSIZE_OVERRIDE) {
753
		chansize = 16;
753
		chansize = 16;
754
	} else if (tmp & CHANSIZE_MASK) {
754
	} else if (tmp & CHANSIZE_MASK) {
755
		chansize = 64;
755
		chansize = 64;
756
	} else {
756
	} else {
757
		chansize = 32;
757
		chansize = 32;
758
	}
758
	}
759
	tmp = RREG32(CHMAP);
759
	tmp = RREG32(CHMAP);
760
	switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
760
	switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
761
	case 0:
761
	case 0:
762
	default:
762
	default:
763
		numchan = 1;
763
		numchan = 1;
764
		break;
764
		break;
765
	case 1:
765
	case 1:
766
		numchan = 2;
766
		numchan = 2;
767
		break;
767
		break;
768
	case 2:
768
	case 2:
769
		numchan = 4;
769
		numchan = 4;
770
		break;
770
		break;
771
	case 3:
771
	case 3:
772
		numchan = 8;
772
		numchan = 8;
773
		break;
773
		break;
774
	}
774
	}
775
	rdev->mc.vram_width = numchan * chansize;
775
	rdev->mc.vram_width = numchan * chansize;
776
	/* Could aper size report 0 ? */
776
	/* Could aper size report 0 ? */
777
	rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
777
	rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
778
	rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
778
	rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
779
	/* Setup GPU memory space */
779
	/* Setup GPU memory space */
780
	rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
780
	rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
781
	rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
781
	rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
782
	rdev->mc.visible_vram_size = rdev->mc.aper_size;
782
	rdev->mc.visible_vram_size = rdev->mc.aper_size;
783
	r600_vram_gtt_location(rdev, &rdev->mc);
783
	r600_vram_gtt_location(rdev, &rdev->mc);
784
 
784
 
785
	if (rdev->flags & RADEON_IS_IGP) {
785
	if (rdev->flags & RADEON_IS_IGP) {
786
		rs690_pm_info(rdev);
786
		rs690_pm_info(rdev);
787
		rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
787
		rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
788
	}
788
	}
789
	radeon_update_bandwidth_info(rdev);
789
	radeon_update_bandwidth_info(rdev);
790
	return 0;
790
	return 0;
791
}
791
}
792
 
792
 
793
/* We doesn't check that the GPU really needs a reset we simply do the
793
/* We doesn't check that the GPU really needs a reset we simply do the
794
 * reset, it's up to the caller to determine if the GPU needs one. We
794
 * reset, it's up to the caller to determine if the GPU needs one. We
795
 * might add an helper function to check that.
795
 * might add an helper function to check that.
796
 */
796
 */
797
int r600_gpu_soft_reset(struct radeon_device *rdev)
797
int r600_gpu_soft_reset(struct radeon_device *rdev)
798
{
798
{
799
	struct rv515_mc_save save;
799
	struct rv515_mc_save save;
800
	u32 grbm_busy_mask = S_008010_VC_BUSY(1) | S_008010_VGT_BUSY_NO_DMA(1) |
800
	u32 grbm_busy_mask = S_008010_VC_BUSY(1) | S_008010_VGT_BUSY_NO_DMA(1) |
801
				S_008010_VGT_BUSY(1) | S_008010_TA03_BUSY(1) |
801
				S_008010_VGT_BUSY(1) | S_008010_TA03_BUSY(1) |
802
				S_008010_TC_BUSY(1) | S_008010_SX_BUSY(1) |
802
				S_008010_TC_BUSY(1) | S_008010_SX_BUSY(1) |
803
				S_008010_SH_BUSY(1) | S_008010_SPI03_BUSY(1) |
803
				S_008010_SH_BUSY(1) | S_008010_SPI03_BUSY(1) |
804
				S_008010_SMX_BUSY(1) | S_008010_SC_BUSY(1) |
804
				S_008010_SMX_BUSY(1) | S_008010_SC_BUSY(1) |
805
				S_008010_PA_BUSY(1) | S_008010_DB03_BUSY(1) |
805
				S_008010_PA_BUSY(1) | S_008010_DB03_BUSY(1) |
806
				S_008010_CR_BUSY(1) | S_008010_CB03_BUSY(1) |
806
				S_008010_CR_BUSY(1) | S_008010_CB03_BUSY(1) |
807
				S_008010_GUI_ACTIVE(1);
807
				S_008010_GUI_ACTIVE(1);
808
	u32 grbm2_busy_mask = S_008014_SPI0_BUSY(1) | S_008014_SPI1_BUSY(1) |
808
	u32 grbm2_busy_mask = S_008014_SPI0_BUSY(1) | S_008014_SPI1_BUSY(1) |
809
			S_008014_SPI2_BUSY(1) | S_008014_SPI3_BUSY(1) |
809
			S_008014_SPI2_BUSY(1) | S_008014_SPI3_BUSY(1) |
810
			S_008014_TA0_BUSY(1) | S_008014_TA1_BUSY(1) |
810
			S_008014_TA0_BUSY(1) | S_008014_TA1_BUSY(1) |
811
			S_008014_TA2_BUSY(1) | S_008014_TA3_BUSY(1) |
811
			S_008014_TA2_BUSY(1) | S_008014_TA3_BUSY(1) |
812
			S_008014_DB0_BUSY(1) | S_008014_DB1_BUSY(1) |
812
			S_008014_DB0_BUSY(1) | S_008014_DB1_BUSY(1) |
813
			S_008014_DB2_BUSY(1) | S_008014_DB3_BUSY(1) |
813
			S_008014_DB2_BUSY(1) | S_008014_DB3_BUSY(1) |
814
			S_008014_CB0_BUSY(1) | S_008014_CB1_BUSY(1) |
814
			S_008014_CB0_BUSY(1) | S_008014_CB1_BUSY(1) |
815
			S_008014_CB2_BUSY(1) | S_008014_CB3_BUSY(1);
815
			S_008014_CB2_BUSY(1) | S_008014_CB3_BUSY(1);
816
	u32 tmp;
816
	u32 tmp;
817
 
817
 
818
	if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
818
	if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
819
		return 0;
819
		return 0;
820
 
820
 
821
	dev_info(rdev->dev, "GPU softreset \n");
821
	dev_info(rdev->dev, "GPU softreset \n");
822
	dev_info(rdev->dev, "  R_008010_GRBM_STATUS=0x%08X\n",
822
	dev_info(rdev->dev, "  R_008010_GRBM_STATUS=0x%08X\n",
823
		RREG32(R_008010_GRBM_STATUS));
823
		RREG32(R_008010_GRBM_STATUS));
824
	dev_info(rdev->dev, "  R_008014_GRBM_STATUS2=0x%08X\n",
824
	dev_info(rdev->dev, "  R_008014_GRBM_STATUS2=0x%08X\n",
825
		RREG32(R_008014_GRBM_STATUS2));
825
		RREG32(R_008014_GRBM_STATUS2));
826
	dev_info(rdev->dev, "  R_000E50_SRBM_STATUS=0x%08X\n",
826
	dev_info(rdev->dev, "  R_000E50_SRBM_STATUS=0x%08X\n",
827
		RREG32(R_000E50_SRBM_STATUS));
827
		RREG32(R_000E50_SRBM_STATUS));
828
	rv515_mc_stop(rdev, &save);
828
	rv515_mc_stop(rdev, &save);
829
	if (r600_mc_wait_for_idle(rdev)) {
829
	if (r600_mc_wait_for_idle(rdev)) {
830
		dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
830
		dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
831
	}
831
	}
832
	/* Disable CP parsing/prefetching */
832
	/* Disable CP parsing/prefetching */
833
	WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
833
	WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
834
	/* Check if any of the rendering block is busy and reset it */
834
	/* Check if any of the rendering block is busy and reset it */
835
	if ((RREG32(R_008010_GRBM_STATUS) & grbm_busy_mask) ||
835
	if ((RREG32(R_008010_GRBM_STATUS) & grbm_busy_mask) ||
836
	    (RREG32(R_008014_GRBM_STATUS2) & grbm2_busy_mask)) {
836
	    (RREG32(R_008014_GRBM_STATUS2) & grbm2_busy_mask)) {
837
		tmp = S_008020_SOFT_RESET_CR(1) |
837
		tmp = S_008020_SOFT_RESET_CR(1) |
838
			S_008020_SOFT_RESET_DB(1) |
838
			S_008020_SOFT_RESET_DB(1) |
839
			S_008020_SOFT_RESET_CB(1) |
839
			S_008020_SOFT_RESET_CB(1) |
840
			S_008020_SOFT_RESET_PA(1) |
840
			S_008020_SOFT_RESET_PA(1) |
841
			S_008020_SOFT_RESET_SC(1) |
841
			S_008020_SOFT_RESET_SC(1) |
842
			S_008020_SOFT_RESET_SMX(1) |
842
			S_008020_SOFT_RESET_SMX(1) |
843
			S_008020_SOFT_RESET_SPI(1) |
843
			S_008020_SOFT_RESET_SPI(1) |
844
			S_008020_SOFT_RESET_SX(1) |
844
			S_008020_SOFT_RESET_SX(1) |
845
			S_008020_SOFT_RESET_SH(1) |
845
			S_008020_SOFT_RESET_SH(1) |
846
			S_008020_SOFT_RESET_TC(1) |
846
			S_008020_SOFT_RESET_TC(1) |
847
			S_008020_SOFT_RESET_TA(1) |
847
			S_008020_SOFT_RESET_TA(1) |
848
			S_008020_SOFT_RESET_VC(1) |
848
			S_008020_SOFT_RESET_VC(1) |
849
			S_008020_SOFT_RESET_VGT(1);
849
			S_008020_SOFT_RESET_VGT(1);
850
		dev_info(rdev->dev, "  R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
850
		dev_info(rdev->dev, "  R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
851
		WREG32(R_008020_GRBM_SOFT_RESET, tmp);
851
		WREG32(R_008020_GRBM_SOFT_RESET, tmp);
852
		RREG32(R_008020_GRBM_SOFT_RESET);
852
		RREG32(R_008020_GRBM_SOFT_RESET);
853
		mdelay(15);
853
		mdelay(15);
854
		WREG32(R_008020_GRBM_SOFT_RESET, 0);
854
		WREG32(R_008020_GRBM_SOFT_RESET, 0);
855
	}
855
	}
856
	/* Reset CP (we always reset CP) */
856
	/* Reset CP (we always reset CP) */
857
	tmp = S_008020_SOFT_RESET_CP(1);
857
	tmp = S_008020_SOFT_RESET_CP(1);
858
	dev_info(rdev->dev, "R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
858
	dev_info(rdev->dev, "R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
859
	WREG32(R_008020_GRBM_SOFT_RESET, tmp);
859
	WREG32(R_008020_GRBM_SOFT_RESET, tmp);
860
	RREG32(R_008020_GRBM_SOFT_RESET);
860
	RREG32(R_008020_GRBM_SOFT_RESET);
861
	mdelay(15);
861
	mdelay(15);
862
	WREG32(R_008020_GRBM_SOFT_RESET, 0);
862
	WREG32(R_008020_GRBM_SOFT_RESET, 0);
863
	/* Wait a little for things to settle down */
863
	/* Wait a little for things to settle down */
864
	mdelay(1);
864
	mdelay(1);
865
	dev_info(rdev->dev, "  R_008010_GRBM_STATUS=0x%08X\n",
865
	dev_info(rdev->dev, "  R_008010_GRBM_STATUS=0x%08X\n",
866
		RREG32(R_008010_GRBM_STATUS));
866
		RREG32(R_008010_GRBM_STATUS));
867
	dev_info(rdev->dev, "  R_008014_GRBM_STATUS2=0x%08X\n",
867
	dev_info(rdev->dev, "  R_008014_GRBM_STATUS2=0x%08X\n",
868
		RREG32(R_008014_GRBM_STATUS2));
868
		RREG32(R_008014_GRBM_STATUS2));
869
	dev_info(rdev->dev, "  R_000E50_SRBM_STATUS=0x%08X\n",
869
	dev_info(rdev->dev, "  R_000E50_SRBM_STATUS=0x%08X\n",
870
		RREG32(R_000E50_SRBM_STATUS));
870
		RREG32(R_000E50_SRBM_STATUS));
871
	rv515_mc_resume(rdev, &save);
871
	rv515_mc_resume(rdev, &save);
872
	return 0;
872
	return 0;
873
}
873
}
874
 
874
 
875
bool r600_gpu_is_lockup(struct radeon_device *rdev)
875
bool r600_gpu_is_lockup(struct radeon_device *rdev)
876
{
876
{
877
	u32 srbm_status;
877
	u32 srbm_status;
878
	u32 grbm_status;
878
	u32 grbm_status;
879
	u32 grbm_status2;
879
	u32 grbm_status2;
880
	struct r100_gpu_lockup *lockup;
880
	struct r100_gpu_lockup *lockup;
881
	int r;
881
	int r;
882
 
882
 
883
	if (rdev->family >= CHIP_RV770)
883
	if (rdev->family >= CHIP_RV770)
884
		lockup = &rdev->config.rv770.lockup;
884
		lockup = &rdev->config.rv770.lockup;
885
	else
885
	else
886
		lockup = &rdev->config.r600.lockup;
886
		lockup = &rdev->config.r600.lockup;
887
 
887
 
888
	srbm_status = RREG32(R_000E50_SRBM_STATUS);
888
	srbm_status = RREG32(R_000E50_SRBM_STATUS);
889
	grbm_status = RREG32(R_008010_GRBM_STATUS);
889
	grbm_status = RREG32(R_008010_GRBM_STATUS);
890
	grbm_status2 = RREG32(R_008014_GRBM_STATUS2);
890
	grbm_status2 = RREG32(R_008014_GRBM_STATUS2);
891
	if (!G_008010_GUI_ACTIVE(grbm_status)) {
891
	if (!G_008010_GUI_ACTIVE(grbm_status)) {
892
		r100_gpu_lockup_update(lockup, &rdev->cp);
892
		r100_gpu_lockup_update(lockup, &rdev->cp);
893
		return false;
893
		return false;
894
	}
894
	}
895
	/* force CP activities */
895
	/* force CP activities */
896
	r = radeon_ring_lock(rdev, 2);
896
	r = radeon_ring_lock(rdev, 2);
897
	if (!r) {
897
	if (!r) {
898
		/* PACKET2 NOP */
898
		/* PACKET2 NOP */
899
		radeon_ring_write(rdev, 0x80000000);
899
		radeon_ring_write(rdev, 0x80000000);
900
		radeon_ring_write(rdev, 0x80000000);
900
		radeon_ring_write(rdev, 0x80000000);
901
		radeon_ring_unlock_commit(rdev);
901
		radeon_ring_unlock_commit(rdev);
902
	}
902
	}
903
	rdev->cp.rptr = RREG32(R600_CP_RB_RPTR);
903
	rdev->cp.rptr = RREG32(R600_CP_RB_RPTR);
904
	return r100_gpu_cp_is_lockup(rdev, lockup, &rdev->cp);
904
	return r100_gpu_cp_is_lockup(rdev, lockup, &rdev->cp);
905
}
905
}
906
 
906
 
907
int r600_asic_reset(struct radeon_device *rdev)
907
int r600_asic_reset(struct radeon_device *rdev)
908
{
908
{
909
	return r600_gpu_soft_reset(rdev);
909
	return r600_gpu_soft_reset(rdev);
910
}
910
}
911
 
911
 
912
static u32 r600_get_tile_pipe_to_backend_map(u32 num_tile_pipes,
912
static u32 r600_get_tile_pipe_to_backend_map(u32 num_tile_pipes,
913
					     u32 num_backends,
913
					     u32 num_backends,
914
					     u32 backend_disable_mask)
914
					     u32 backend_disable_mask)
915
{
915
{
916
	u32 backend_map = 0;
916
	u32 backend_map = 0;
917
	u32 enabled_backends_mask;
917
	u32 enabled_backends_mask;
918
	u32 enabled_backends_count;
918
	u32 enabled_backends_count;
919
	u32 cur_pipe;
919
	u32 cur_pipe;
920
	u32 swizzle_pipe[R6XX_MAX_PIPES];
920
	u32 swizzle_pipe[R6XX_MAX_PIPES];
921
	u32 cur_backend;
921
	u32 cur_backend;
922
	u32 i;
922
	u32 i;
923
 
923
 
924
	if (num_tile_pipes > R6XX_MAX_PIPES)
924
	if (num_tile_pipes > R6XX_MAX_PIPES)
925
		num_tile_pipes = R6XX_MAX_PIPES;
925
		num_tile_pipes = R6XX_MAX_PIPES;
926
	if (num_tile_pipes < 1)
926
	if (num_tile_pipes < 1)
927
		num_tile_pipes = 1;
927
		num_tile_pipes = 1;
928
	if (num_backends > R6XX_MAX_BACKENDS)
928
	if (num_backends > R6XX_MAX_BACKENDS)
929
		num_backends = R6XX_MAX_BACKENDS;
929
		num_backends = R6XX_MAX_BACKENDS;
930
	if (num_backends < 1)
930
	if (num_backends < 1)
931
		num_backends = 1;
931
		num_backends = 1;
932
 
932
 
933
	enabled_backends_mask = 0;
933
	enabled_backends_mask = 0;
934
	enabled_backends_count = 0;
934
	enabled_backends_count = 0;
935
	for (i = 0; i < R6XX_MAX_BACKENDS; ++i) {
935
	for (i = 0; i < R6XX_MAX_BACKENDS; ++i) {
936
		if (((backend_disable_mask >> i) & 1) == 0) {
936
		if (((backend_disable_mask >> i) & 1) == 0) {
937
			enabled_backends_mask |= (1 << i);
937
			enabled_backends_mask |= (1 << i);
938
			++enabled_backends_count;
938
			++enabled_backends_count;
939
		}
939
		}
940
		if (enabled_backends_count == num_backends)
940
		if (enabled_backends_count == num_backends)
941
			break;
941
			break;
942
	}
942
	}
943
 
943
 
944
	if (enabled_backends_count == 0) {
944
	if (enabled_backends_count == 0) {
945
		enabled_backends_mask = 1;
945
		enabled_backends_mask = 1;
946
		enabled_backends_count = 1;
946
		enabled_backends_count = 1;
947
	}
947
	}
948
 
948
 
949
	if (enabled_backends_count != num_backends)
949
	if (enabled_backends_count != num_backends)
950
		num_backends = enabled_backends_count;
950
		num_backends = enabled_backends_count;
951
 
951
 
952
	memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * R6XX_MAX_PIPES);
952
	memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * R6XX_MAX_PIPES);
953
	switch (num_tile_pipes) {
953
	switch (num_tile_pipes) {
954
	case 1:
954
	case 1:
955
		swizzle_pipe[0] = 0;
955
		swizzle_pipe[0] = 0;
956
		break;
956
		break;
957
	case 2:
957
	case 2:
958
		swizzle_pipe[0] = 0;
958
		swizzle_pipe[0] = 0;
959
		swizzle_pipe[1] = 1;
959
		swizzle_pipe[1] = 1;
960
		break;
960
		break;
961
	case 3:
961
	case 3:
962
		swizzle_pipe[0] = 0;
962
		swizzle_pipe[0] = 0;
963
		swizzle_pipe[1] = 1;
963
		swizzle_pipe[1] = 1;
964
		swizzle_pipe[2] = 2;
964
		swizzle_pipe[2] = 2;
965
		break;
965
		break;
966
	case 4:
966
	case 4:
967
		swizzle_pipe[0] = 0;
967
		swizzle_pipe[0] = 0;
968
		swizzle_pipe[1] = 1;
968
		swizzle_pipe[1] = 1;
969
		swizzle_pipe[2] = 2;
969
		swizzle_pipe[2] = 2;
970
		swizzle_pipe[3] = 3;
970
		swizzle_pipe[3] = 3;
971
		break;
971
		break;
972
	case 5:
972
	case 5:
973
		swizzle_pipe[0] = 0;
973
		swizzle_pipe[0] = 0;
974
		swizzle_pipe[1] = 1;
974
		swizzle_pipe[1] = 1;
975
		swizzle_pipe[2] = 2;
975
		swizzle_pipe[2] = 2;
976
		swizzle_pipe[3] = 3;
976
		swizzle_pipe[3] = 3;
977
		swizzle_pipe[4] = 4;
977
		swizzle_pipe[4] = 4;
978
		break;
978
		break;
979
	case 6:
979
	case 6:
980
		swizzle_pipe[0] = 0;
980
		swizzle_pipe[0] = 0;
981
		swizzle_pipe[1] = 2;
981
		swizzle_pipe[1] = 2;
982
		swizzle_pipe[2] = 4;
982
		swizzle_pipe[2] = 4;
983
		swizzle_pipe[3] = 5;
983
		swizzle_pipe[3] = 5;
984
		swizzle_pipe[4] = 1;
984
		swizzle_pipe[4] = 1;
985
		swizzle_pipe[5] = 3;
985
		swizzle_pipe[5] = 3;
986
		break;
986
		break;
987
	case 7:
987
	case 7:
988
		swizzle_pipe[0] = 0;
988
		swizzle_pipe[0] = 0;
989
		swizzle_pipe[1] = 2;
989
		swizzle_pipe[1] = 2;
990
		swizzle_pipe[2] = 4;
990
		swizzle_pipe[2] = 4;
991
		swizzle_pipe[3] = 6;
991
		swizzle_pipe[3] = 6;
992
		swizzle_pipe[4] = 1;
992
		swizzle_pipe[4] = 1;
993
		swizzle_pipe[5] = 3;
993
		swizzle_pipe[5] = 3;
994
		swizzle_pipe[6] = 5;
994
		swizzle_pipe[6] = 5;
995
		break;
995
		break;
996
	case 8:
996
	case 8:
997
		swizzle_pipe[0] = 0;
997
		swizzle_pipe[0] = 0;
998
		swizzle_pipe[1] = 2;
998
		swizzle_pipe[1] = 2;
999
		swizzle_pipe[2] = 4;
999
		swizzle_pipe[2] = 4;
1000
		swizzle_pipe[3] = 6;
1000
		swizzle_pipe[3] = 6;
1001
		swizzle_pipe[4] = 1;
1001
		swizzle_pipe[4] = 1;
1002
		swizzle_pipe[5] = 3;
1002
		swizzle_pipe[5] = 3;
1003
		swizzle_pipe[6] = 5;
1003
		swizzle_pipe[6] = 5;
1004
		swizzle_pipe[7] = 7;
1004
		swizzle_pipe[7] = 7;
1005
		break;
1005
		break;
1006
	}
1006
	}
1007
 
1007
 
1008
	cur_backend = 0;
1008
	cur_backend = 0;
1009
	for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) {
1009
	for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) {
1010
		while (((1 << cur_backend) & enabled_backends_mask) == 0)
1010
		while (((1 << cur_backend) & enabled_backends_mask) == 0)
1011
			cur_backend = (cur_backend + 1) % R6XX_MAX_BACKENDS;
1011
			cur_backend = (cur_backend + 1) % R6XX_MAX_BACKENDS;
1012
 
1012
 
1013
		backend_map |= (u32)(((cur_backend & 3) << (swizzle_pipe[cur_pipe] * 2)));
1013
		backend_map |= (u32)(((cur_backend & 3) << (swizzle_pipe[cur_pipe] * 2)));
1014
 
1014
 
1015
		cur_backend = (cur_backend + 1) % R6XX_MAX_BACKENDS;
1015
		cur_backend = (cur_backend + 1) % R6XX_MAX_BACKENDS;
1016
	}
1016
	}
1017
 
1017
 
1018
	return backend_map;
1018
	return backend_map;
1019
}
1019
}
1020
 
1020
 
1021
int r600_count_pipe_bits(uint32_t val)
1021
int r600_count_pipe_bits(uint32_t val)
1022
{
1022
{
1023
	int i, ret = 0;
1023
	int i, ret = 0;
1024
 
1024
 
1025
	for (i = 0; i < 32; i++) {
1025
	for (i = 0; i < 32; i++) {
1026
		ret += val & 1;
1026
		ret += val & 1;
1027
		val >>= 1;
1027
		val >>= 1;
1028
	}
1028
	}
1029
	return ret;
1029
	return ret;
1030
}
1030
}
1031
 
1031
 
1032
void r600_gpu_init(struct radeon_device *rdev)
1032
void r600_gpu_init(struct radeon_device *rdev)
1033
{
1033
{
1034
	u32 tiling_config;
1034
	u32 tiling_config;
1035
	u32 ramcfg;
1035
	u32 ramcfg;
1036
	u32 backend_map;
1036
	u32 backend_map;
1037
	u32 cc_rb_backend_disable;
1037
	u32 cc_rb_backend_disable;
1038
	u32 cc_gc_shader_pipe_config;
1038
	u32 cc_gc_shader_pipe_config;
1039
	u32 tmp;
1039
	u32 tmp;
1040
	int i, j;
1040
	int i, j;
1041
	u32 sq_config;
1041
	u32 sq_config;
1042
	u32 sq_gpr_resource_mgmt_1 = 0;
1042
	u32 sq_gpr_resource_mgmt_1 = 0;
1043
	u32 sq_gpr_resource_mgmt_2 = 0;
1043
	u32 sq_gpr_resource_mgmt_2 = 0;
1044
	u32 sq_thread_resource_mgmt = 0;
1044
	u32 sq_thread_resource_mgmt = 0;
1045
	u32 sq_stack_resource_mgmt_1 = 0;
1045
	u32 sq_stack_resource_mgmt_1 = 0;
1046
	u32 sq_stack_resource_mgmt_2 = 0;
1046
	u32 sq_stack_resource_mgmt_2 = 0;
1047
 
1047
 
1048
	/* FIXME: implement */
1048
	/* FIXME: implement */
1049
	switch (rdev->family) {
1049
	switch (rdev->family) {
1050
	case CHIP_R600:
1050
	case CHIP_R600:
1051
		rdev->config.r600.max_pipes = 4;
1051
		rdev->config.r600.max_pipes = 4;
1052
		rdev->config.r600.max_tile_pipes = 8;
1052
		rdev->config.r600.max_tile_pipes = 8;
1053
		rdev->config.r600.max_simds = 4;
1053
		rdev->config.r600.max_simds = 4;
1054
		rdev->config.r600.max_backends = 4;
1054
		rdev->config.r600.max_backends = 4;
1055
		rdev->config.r600.max_gprs = 256;
1055
		rdev->config.r600.max_gprs = 256;
1056
		rdev->config.r600.max_threads = 192;
1056
		rdev->config.r600.max_threads = 192;
1057
		rdev->config.r600.max_stack_entries = 256;
1057
		rdev->config.r600.max_stack_entries = 256;
1058
		rdev->config.r600.max_hw_contexts = 8;
1058
		rdev->config.r600.max_hw_contexts = 8;
1059
		rdev->config.r600.max_gs_threads = 16;
1059
		rdev->config.r600.max_gs_threads = 16;
1060
		rdev->config.r600.sx_max_export_size = 128;
1060
		rdev->config.r600.sx_max_export_size = 128;
1061
		rdev->config.r600.sx_max_export_pos_size = 16;
1061
		rdev->config.r600.sx_max_export_pos_size = 16;
1062
		rdev->config.r600.sx_max_export_smx_size = 128;
1062
		rdev->config.r600.sx_max_export_smx_size = 128;
1063
		rdev->config.r600.sq_num_cf_insts = 2;
1063
		rdev->config.r600.sq_num_cf_insts = 2;
1064
		break;
1064
		break;
1065
	case CHIP_RV630:
1065
	case CHIP_RV630:
1066
	case CHIP_RV635:
1066
	case CHIP_RV635:
1067
		rdev->config.r600.max_pipes = 2;
1067
		rdev->config.r600.max_pipes = 2;
1068
		rdev->config.r600.max_tile_pipes = 2;
1068
		rdev->config.r600.max_tile_pipes = 2;
1069
		rdev->config.r600.max_simds = 3;
1069
		rdev->config.r600.max_simds = 3;
1070
		rdev->config.r600.max_backends = 1;
1070
		rdev->config.r600.max_backends = 1;
1071
		rdev->config.r600.max_gprs = 128;
1071
		rdev->config.r600.max_gprs = 128;
1072
		rdev->config.r600.max_threads = 192;
1072
		rdev->config.r600.max_threads = 192;
1073
		rdev->config.r600.max_stack_entries = 128;
1073
		rdev->config.r600.max_stack_entries = 128;
1074
		rdev->config.r600.max_hw_contexts = 8;
1074
		rdev->config.r600.max_hw_contexts = 8;
1075
		rdev->config.r600.max_gs_threads = 4;
1075
		rdev->config.r600.max_gs_threads = 4;
1076
		rdev->config.r600.sx_max_export_size = 128;
1076
		rdev->config.r600.sx_max_export_size = 128;
1077
		rdev->config.r600.sx_max_export_pos_size = 16;
1077
		rdev->config.r600.sx_max_export_pos_size = 16;
1078
		rdev->config.r600.sx_max_export_smx_size = 128;
1078
		rdev->config.r600.sx_max_export_smx_size = 128;
1079
		rdev->config.r600.sq_num_cf_insts = 2;
1079
		rdev->config.r600.sq_num_cf_insts = 2;
1080
		break;
1080
		break;
1081
	case CHIP_RV610:
1081
	case CHIP_RV610:
1082
	case CHIP_RV620:
1082
	case CHIP_RV620:
1083
	case CHIP_RS780:
1083
	case CHIP_RS780:
1084
	case CHIP_RS880:
1084
	case CHIP_RS880:
1085
		rdev->config.r600.max_pipes = 1;
1085
		rdev->config.r600.max_pipes = 1;
1086
		rdev->config.r600.max_tile_pipes = 1;
1086
		rdev->config.r600.max_tile_pipes = 1;
1087
		rdev->config.r600.max_simds = 2;
1087
		rdev->config.r600.max_simds = 2;
1088
		rdev->config.r600.max_backends = 1;
1088
		rdev->config.r600.max_backends = 1;
1089
		rdev->config.r600.max_gprs = 128;
1089
		rdev->config.r600.max_gprs = 128;
1090
		rdev->config.r600.max_threads = 192;
1090
		rdev->config.r600.max_threads = 192;
1091
		rdev->config.r600.max_stack_entries = 128;
1091
		rdev->config.r600.max_stack_entries = 128;
1092
		rdev->config.r600.max_hw_contexts = 4;
1092
		rdev->config.r600.max_hw_contexts = 4;
1093
		rdev->config.r600.max_gs_threads = 4;
1093
		rdev->config.r600.max_gs_threads = 4;
1094
		rdev->config.r600.sx_max_export_size = 128;
1094
		rdev->config.r600.sx_max_export_size = 128;
1095
		rdev->config.r600.sx_max_export_pos_size = 16;
1095
		rdev->config.r600.sx_max_export_pos_size = 16;
1096
		rdev->config.r600.sx_max_export_smx_size = 128;
1096
		rdev->config.r600.sx_max_export_smx_size = 128;
1097
		rdev->config.r600.sq_num_cf_insts = 1;
1097
		rdev->config.r600.sq_num_cf_insts = 1;
1098
		break;
1098
		break;
1099
	case CHIP_RV670:
1099
	case CHIP_RV670:
1100
		rdev->config.r600.max_pipes = 4;
1100
		rdev->config.r600.max_pipes = 4;
1101
		rdev->config.r600.max_tile_pipes = 4;
1101
		rdev->config.r600.max_tile_pipes = 4;
1102
		rdev->config.r600.max_simds = 4;
1102
		rdev->config.r600.max_simds = 4;
1103
		rdev->config.r600.max_backends = 4;
1103
		rdev->config.r600.max_backends = 4;
1104
		rdev->config.r600.max_gprs = 192;
1104
		rdev->config.r600.max_gprs = 192;
1105
		rdev->config.r600.max_threads = 192;
1105
		rdev->config.r600.max_threads = 192;
1106
		rdev->config.r600.max_stack_entries = 256;
1106
		rdev->config.r600.max_stack_entries = 256;
1107
		rdev->config.r600.max_hw_contexts = 8;
1107
		rdev->config.r600.max_hw_contexts = 8;
1108
		rdev->config.r600.max_gs_threads = 16;
1108
		rdev->config.r600.max_gs_threads = 16;
1109
		rdev->config.r600.sx_max_export_size = 128;
1109
		rdev->config.r600.sx_max_export_size = 128;
1110
		rdev->config.r600.sx_max_export_pos_size = 16;
1110
		rdev->config.r600.sx_max_export_pos_size = 16;
1111
		rdev->config.r600.sx_max_export_smx_size = 128;
1111
		rdev->config.r600.sx_max_export_smx_size = 128;
1112
		rdev->config.r600.sq_num_cf_insts = 2;
1112
		rdev->config.r600.sq_num_cf_insts = 2;
1113
		break;
1113
		break;
1114
	default:
1114
	default:
1115
		break;
1115
		break;
1116
	}
1116
	}
1117
 
1117
 
1118
	/* Initialize HDP */
1118
	/* Initialize HDP */
1119
	for (i = 0, j = 0; i < 32; i++, j += 0x18) {
1119
	for (i = 0, j = 0; i < 32; i++, j += 0x18) {
1120
		WREG32((0x2c14 + j), 0x00000000);
1120
		WREG32((0x2c14 + j), 0x00000000);
1121
		WREG32((0x2c18 + j), 0x00000000);
1121
		WREG32((0x2c18 + j), 0x00000000);
1122
		WREG32((0x2c1c + j), 0x00000000);
1122
		WREG32((0x2c1c + j), 0x00000000);
1123
		WREG32((0x2c20 + j), 0x00000000);
1123
		WREG32((0x2c20 + j), 0x00000000);
1124
		WREG32((0x2c24 + j), 0x00000000);
1124
		WREG32((0x2c24 + j), 0x00000000);
1125
	}
1125
	}
1126
 
1126
 
1127
	WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
1127
	WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
1128
 
1128
 
1129
	/* Setup tiling */
1129
	/* Setup tiling */
1130
	tiling_config = 0;
1130
	tiling_config = 0;
1131
	ramcfg = RREG32(RAMCFG);
1131
	ramcfg = RREG32(RAMCFG);
1132
	switch (rdev->config.r600.max_tile_pipes) {
1132
	switch (rdev->config.r600.max_tile_pipes) {
1133
	case 1:
1133
	case 1:
1134
		tiling_config |= PIPE_TILING(0);
1134
		tiling_config |= PIPE_TILING(0);
1135
		break;
1135
		break;
1136
	case 2:
1136
	case 2:
1137
		tiling_config |= PIPE_TILING(1);
1137
		tiling_config |= PIPE_TILING(1);
1138
		break;
1138
		break;
1139
	case 4:
1139
	case 4:
1140
		tiling_config |= PIPE_TILING(2);
1140
		tiling_config |= PIPE_TILING(2);
1141
		break;
1141
		break;
1142
	case 8:
1142
	case 8:
1143
		tiling_config |= PIPE_TILING(3);
1143
		tiling_config |= PIPE_TILING(3);
1144
		break;
1144
		break;
1145
	default:
1145
	default:
1146
		break;
1146
		break;
1147
	}
1147
	}
1148
	rdev->config.r600.tiling_npipes = rdev->config.r600.max_tile_pipes;
1148
	rdev->config.r600.tiling_npipes = rdev->config.r600.max_tile_pipes;
1149
	rdev->config.r600.tiling_nbanks = 4 << ((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
1149
	rdev->config.r600.tiling_nbanks = 4 << ((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
1150
	tiling_config |= BANK_TILING((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
1150
	tiling_config |= BANK_TILING((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
1151
	tiling_config |= GROUP_SIZE((ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT);
1151
	tiling_config |= GROUP_SIZE((ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT);
1152
	if ((ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT)
1152
	if ((ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT)
1153
		rdev->config.r600.tiling_group_size = 512;
1153
		rdev->config.r600.tiling_group_size = 512;
1154
	else
1154
	else
1155
	rdev->config.r600.tiling_group_size = 256;
1155
	rdev->config.r600.tiling_group_size = 256;
1156
	tmp = (ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT;
1156
	tmp = (ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT;
1157
	if (tmp > 3) {
1157
	if (tmp > 3) {
1158
		tiling_config |= ROW_TILING(3);
1158
		tiling_config |= ROW_TILING(3);
1159
		tiling_config |= SAMPLE_SPLIT(3);
1159
		tiling_config |= SAMPLE_SPLIT(3);
1160
	} else {
1160
	} else {
1161
		tiling_config |= ROW_TILING(tmp);
1161
		tiling_config |= ROW_TILING(tmp);
1162
		tiling_config |= SAMPLE_SPLIT(tmp);
1162
		tiling_config |= SAMPLE_SPLIT(tmp);
1163
	}
1163
	}
1164
	tiling_config |= BANK_SWAPS(1);
1164
	tiling_config |= BANK_SWAPS(1);
1165
 
1165
 
1166
	cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000;
1166
	cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000;
1167
	cc_rb_backend_disable |=
1167
	cc_rb_backend_disable |=
1168
		BACKEND_DISABLE((R6XX_MAX_BACKENDS_MASK << rdev->config.r600.max_backends) & R6XX_MAX_BACKENDS_MASK);
1168
		BACKEND_DISABLE((R6XX_MAX_BACKENDS_MASK << rdev->config.r600.max_backends) & R6XX_MAX_BACKENDS_MASK);
1169
 
1169
 
1170
	cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00;
1170
	cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00;
1171
	cc_gc_shader_pipe_config |=
1171
	cc_gc_shader_pipe_config |=
1172
		INACTIVE_QD_PIPES((R6XX_MAX_PIPES_MASK << rdev->config.r600.max_pipes) & R6XX_MAX_PIPES_MASK);
1172
		INACTIVE_QD_PIPES((R6XX_MAX_PIPES_MASK << rdev->config.r600.max_pipes) & R6XX_MAX_PIPES_MASK);
1173
	cc_gc_shader_pipe_config |=
1173
	cc_gc_shader_pipe_config |=
1174
		INACTIVE_SIMDS((R6XX_MAX_SIMDS_MASK << rdev->config.r600.max_simds) & R6XX_MAX_SIMDS_MASK);
1174
		INACTIVE_SIMDS((R6XX_MAX_SIMDS_MASK << rdev->config.r600.max_simds) & R6XX_MAX_SIMDS_MASK);
1175
 
1175
 
1176
	backend_map = r600_get_tile_pipe_to_backend_map(rdev->config.r600.max_tile_pipes,
1176
	backend_map = r600_get_tile_pipe_to_backend_map(rdev->config.r600.max_tile_pipes,
1177
							(R6XX_MAX_BACKENDS -
1177
							(R6XX_MAX_BACKENDS -
1178
							 r600_count_pipe_bits((cc_rb_backend_disable &
1178
							 r600_count_pipe_bits((cc_rb_backend_disable &
1179
									       R6XX_MAX_BACKENDS_MASK) >> 16)),
1179
									       R6XX_MAX_BACKENDS_MASK) >> 16)),
1180
							(cc_rb_backend_disable >> 16));
1180
							(cc_rb_backend_disable >> 16));
1181
	rdev->config.r600.tile_config = tiling_config;
1181
	rdev->config.r600.tile_config = tiling_config;
1182
	tiling_config |= BACKEND_MAP(backend_map);
1182
	tiling_config |= BACKEND_MAP(backend_map);
1183
	WREG32(GB_TILING_CONFIG, tiling_config);
1183
	WREG32(GB_TILING_CONFIG, tiling_config);
1184
	WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff);
1184
	WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff);
1185
	WREG32(HDP_TILING_CONFIG, tiling_config & 0xffff);
1185
	WREG32(HDP_TILING_CONFIG, tiling_config & 0xffff);
1186
 
1186
 
1187
	/* Setup pipes */
1187
	/* Setup pipes */
1188
	WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
1188
	WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
1189
	WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
1189
	WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
1190
	WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
1190
	WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
1191
 
1191
 
1192
	tmp = R6XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
1192
	tmp = R6XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
1193
	WREG32(VGT_OUT_DEALLOC_CNTL, (tmp * 4) & DEALLOC_DIST_MASK);
1193
	WREG32(VGT_OUT_DEALLOC_CNTL, (tmp * 4) & DEALLOC_DIST_MASK);
1194
	WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((tmp * 4) - 2) & VTX_REUSE_DEPTH_MASK);
1194
	WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((tmp * 4) - 2) & VTX_REUSE_DEPTH_MASK);
1195
 
1195
 
1196
	/* Setup some CP states */
1196
	/* Setup some CP states */
1197
	WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) | ROQ_IB2_START(0x2b)));
1197
	WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) | ROQ_IB2_START(0x2b)));
1198
	WREG32(CP_MEQ_THRESHOLDS, (MEQ_END(0x40) | ROQ_END(0x40)));
1198
	WREG32(CP_MEQ_THRESHOLDS, (MEQ_END(0x40) | ROQ_END(0x40)));
1199
 
1199
 
1200
	WREG32(TA_CNTL_AUX, (DISABLE_CUBE_ANISO | SYNC_GRADIENT |
1200
	WREG32(TA_CNTL_AUX, (DISABLE_CUBE_ANISO | SYNC_GRADIENT |
1201
			     SYNC_WALKER | SYNC_ALIGNER));
1201
			     SYNC_WALKER | SYNC_ALIGNER));
1202
	/* Setup various GPU states */
1202
	/* Setup various GPU states */
1203
	if (rdev->family == CHIP_RV670)
1203
	if (rdev->family == CHIP_RV670)
1204
		WREG32(ARB_GDEC_RD_CNTL, 0x00000021);
1204
		WREG32(ARB_GDEC_RD_CNTL, 0x00000021);
1205
 
1205
 
1206
	tmp = RREG32(SX_DEBUG_1);
1206
	tmp = RREG32(SX_DEBUG_1);
1207
	tmp |= SMX_EVENT_RELEASE;
1207
	tmp |= SMX_EVENT_RELEASE;
1208
	if ((rdev->family > CHIP_R600))
1208
	if ((rdev->family > CHIP_R600))
1209
		tmp |= ENABLE_NEW_SMX_ADDRESS;
1209
		tmp |= ENABLE_NEW_SMX_ADDRESS;
1210
	WREG32(SX_DEBUG_1, tmp);
1210
	WREG32(SX_DEBUG_1, tmp);
1211
 
1211
 
1212
	if (((rdev->family) == CHIP_R600) ||
1212
	if (((rdev->family) == CHIP_R600) ||
1213
	    ((rdev->family) == CHIP_RV630) ||
1213
	    ((rdev->family) == CHIP_RV630) ||
1214
	    ((rdev->family) == CHIP_RV610) ||
1214
	    ((rdev->family) == CHIP_RV610) ||
1215
	    ((rdev->family) == CHIP_RV620) ||
1215
	    ((rdev->family) == CHIP_RV620) ||
1216
	    ((rdev->family) == CHIP_RS780) ||
1216
	    ((rdev->family) == CHIP_RS780) ||
1217
	    ((rdev->family) == CHIP_RS880)) {
1217
	    ((rdev->family) == CHIP_RS880)) {
1218
		WREG32(DB_DEBUG, PREZ_MUST_WAIT_FOR_POSTZ_DONE);
1218
		WREG32(DB_DEBUG, PREZ_MUST_WAIT_FOR_POSTZ_DONE);
1219
	} else {
1219
	} else {
1220
		WREG32(DB_DEBUG, 0);
1220
		WREG32(DB_DEBUG, 0);
1221
	}
1221
	}
1222
	WREG32(DB_WATERMARKS, (DEPTH_FREE(4) | DEPTH_CACHELINE_FREE(16) |
1222
	WREG32(DB_WATERMARKS, (DEPTH_FREE(4) | DEPTH_CACHELINE_FREE(16) |
1223
			       DEPTH_FLUSH(16) | DEPTH_PENDING_FREE(4)));
1223
			       DEPTH_FLUSH(16) | DEPTH_PENDING_FREE(4)));
1224
 
1224
 
1225
	WREG32(PA_SC_MULTI_CHIP_CNTL, 0);
1225
	WREG32(PA_SC_MULTI_CHIP_CNTL, 0);
1226
	WREG32(VGT_NUM_INSTANCES, 0);
1226
	WREG32(VGT_NUM_INSTANCES, 0);
1227
 
1227
 
1228
	WREG32(SPI_CONFIG_CNTL, GPR_WRITE_PRIORITY(0));
1228
	WREG32(SPI_CONFIG_CNTL, GPR_WRITE_PRIORITY(0));
1229
	WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(0));
1229
	WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(0));
1230
 
1230
 
1231
	tmp = RREG32(SQ_MS_FIFO_SIZES);
1231
	tmp = RREG32(SQ_MS_FIFO_SIZES);
1232
	if (((rdev->family) == CHIP_RV610) ||
1232
	if (((rdev->family) == CHIP_RV610) ||
1233
	    ((rdev->family) == CHIP_RV620) ||
1233
	    ((rdev->family) == CHIP_RV620) ||
1234
	    ((rdev->family) == CHIP_RS780) ||
1234
	    ((rdev->family) == CHIP_RS780) ||
1235
	    ((rdev->family) == CHIP_RS880)) {
1235
	    ((rdev->family) == CHIP_RS880)) {
1236
		tmp = (CACHE_FIFO_SIZE(0xa) |
1236
		tmp = (CACHE_FIFO_SIZE(0xa) |
1237
		       FETCH_FIFO_HIWATER(0xa) |
1237
		       FETCH_FIFO_HIWATER(0xa) |
1238
		       DONE_FIFO_HIWATER(0xe0) |
1238
		       DONE_FIFO_HIWATER(0xe0) |
1239
		       ALU_UPDATE_FIFO_HIWATER(0x8));
1239
		       ALU_UPDATE_FIFO_HIWATER(0x8));
1240
	} else if (((rdev->family) == CHIP_R600) ||
1240
	} else if (((rdev->family) == CHIP_R600) ||
1241
		   ((rdev->family) == CHIP_RV630)) {
1241
		   ((rdev->family) == CHIP_RV630)) {
1242
		tmp &= ~DONE_FIFO_HIWATER(0xff);
1242
		tmp &= ~DONE_FIFO_HIWATER(0xff);
1243
		tmp |= DONE_FIFO_HIWATER(0x4);
1243
		tmp |= DONE_FIFO_HIWATER(0x4);
1244
	}
1244
	}
1245
	WREG32(SQ_MS_FIFO_SIZES, tmp);
1245
	WREG32(SQ_MS_FIFO_SIZES, tmp);
1246
 
1246
 
1247
	/* SQ_CONFIG, SQ_GPR_RESOURCE_MGMT, SQ_THREAD_RESOURCE_MGMT, SQ_STACK_RESOURCE_MGMT
1247
	/* SQ_CONFIG, SQ_GPR_RESOURCE_MGMT, SQ_THREAD_RESOURCE_MGMT, SQ_STACK_RESOURCE_MGMT
1248
	 * should be adjusted as needed by the 2D/3D drivers.  This just sets default values
1248
	 * should be adjusted as needed by the 2D/3D drivers.  This just sets default values
1249
	 */
1249
	 */
1250
	sq_config = RREG32(SQ_CONFIG);
1250
	sq_config = RREG32(SQ_CONFIG);
1251
	sq_config &= ~(PS_PRIO(3) |
1251
	sq_config &= ~(PS_PRIO(3) |
1252
		       VS_PRIO(3) |
1252
		       VS_PRIO(3) |
1253
		       GS_PRIO(3) |
1253
		       GS_PRIO(3) |
1254
		       ES_PRIO(3));
1254
		       ES_PRIO(3));
1255
	sq_config |= (DX9_CONSTS |
1255
	sq_config |= (DX9_CONSTS |
1256
		      VC_ENABLE |
1256
		      VC_ENABLE |
1257
		      PS_PRIO(0) |
1257
		      PS_PRIO(0) |
1258
		      VS_PRIO(1) |
1258
		      VS_PRIO(1) |
1259
		      GS_PRIO(2) |
1259
		      GS_PRIO(2) |
1260
		      ES_PRIO(3));
1260
		      ES_PRIO(3));
1261
 
1261
 
1262
	if ((rdev->family) == CHIP_R600) {
1262
	if ((rdev->family) == CHIP_R600) {
1263
		sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(124) |
1263
		sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(124) |
1264
					  NUM_VS_GPRS(124) |
1264
					  NUM_VS_GPRS(124) |
1265
					  NUM_CLAUSE_TEMP_GPRS(4));
1265
					  NUM_CLAUSE_TEMP_GPRS(4));
1266
		sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(0) |
1266
		sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(0) |
1267
					  NUM_ES_GPRS(0));
1267
					  NUM_ES_GPRS(0));
1268
		sq_thread_resource_mgmt = (NUM_PS_THREADS(136) |
1268
		sq_thread_resource_mgmt = (NUM_PS_THREADS(136) |
1269
					   NUM_VS_THREADS(48) |
1269
					   NUM_VS_THREADS(48) |
1270
					   NUM_GS_THREADS(4) |
1270
					   NUM_GS_THREADS(4) |
1271
					   NUM_ES_THREADS(4));
1271
					   NUM_ES_THREADS(4));
1272
		sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(128) |
1272
		sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(128) |
1273
					    NUM_VS_STACK_ENTRIES(128));
1273
					    NUM_VS_STACK_ENTRIES(128));
1274
		sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(0) |
1274
		sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(0) |
1275
					    NUM_ES_STACK_ENTRIES(0));
1275
					    NUM_ES_STACK_ENTRIES(0));
1276
	} else if (((rdev->family) == CHIP_RV610) ||
1276
	} else if (((rdev->family) == CHIP_RV610) ||
1277
		   ((rdev->family) == CHIP_RV620) ||
1277
		   ((rdev->family) == CHIP_RV620) ||
1278
		   ((rdev->family) == CHIP_RS780) ||
1278
		   ((rdev->family) == CHIP_RS780) ||
1279
		   ((rdev->family) == CHIP_RS880)) {
1279
		   ((rdev->family) == CHIP_RS880)) {
1280
		/* no vertex cache */
1280
		/* no vertex cache */
1281
		sq_config &= ~VC_ENABLE;
1281
		sq_config &= ~VC_ENABLE;
1282
 
1282
 
1283
		sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
1283
		sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
1284
					  NUM_VS_GPRS(44) |
1284
					  NUM_VS_GPRS(44) |
1285
					  NUM_CLAUSE_TEMP_GPRS(2));
1285
					  NUM_CLAUSE_TEMP_GPRS(2));
1286
		sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(17) |
1286
		sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(17) |
1287
					  NUM_ES_GPRS(17));
1287
					  NUM_ES_GPRS(17));
1288
		sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
1288
		sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
1289
					   NUM_VS_THREADS(78) |
1289
					   NUM_VS_THREADS(78) |
1290
					   NUM_GS_THREADS(4) |
1290
					   NUM_GS_THREADS(4) |
1291
					   NUM_ES_THREADS(31));
1291
					   NUM_ES_THREADS(31));
1292
		sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(40) |
1292
		sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(40) |
1293
					    NUM_VS_STACK_ENTRIES(40));
1293
					    NUM_VS_STACK_ENTRIES(40));
1294
		sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(32) |
1294
		sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(32) |
1295
					    NUM_ES_STACK_ENTRIES(16));
1295
					    NUM_ES_STACK_ENTRIES(16));
1296
	} else if (((rdev->family) == CHIP_RV630) ||
1296
	} else if (((rdev->family) == CHIP_RV630) ||
1297
		   ((rdev->family) == CHIP_RV635)) {
1297
		   ((rdev->family) == CHIP_RV635)) {
1298
		sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
1298
		sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
1299
					  NUM_VS_GPRS(44) |
1299
					  NUM_VS_GPRS(44) |
1300
					  NUM_CLAUSE_TEMP_GPRS(2));
1300
					  NUM_CLAUSE_TEMP_GPRS(2));
1301
		sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(18) |
1301
		sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(18) |
1302
					  NUM_ES_GPRS(18));
1302
					  NUM_ES_GPRS(18));
1303
		sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
1303
		sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
1304
					   NUM_VS_THREADS(78) |
1304
					   NUM_VS_THREADS(78) |
1305
					   NUM_GS_THREADS(4) |
1305
					   NUM_GS_THREADS(4) |
1306
					   NUM_ES_THREADS(31));
1306
					   NUM_ES_THREADS(31));
1307
		sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(40) |
1307
		sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(40) |
1308
					    NUM_VS_STACK_ENTRIES(40));
1308
					    NUM_VS_STACK_ENTRIES(40));
1309
		sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(32) |
1309
		sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(32) |
1310
					    NUM_ES_STACK_ENTRIES(16));
1310
					    NUM_ES_STACK_ENTRIES(16));
1311
	} else if ((rdev->family) == CHIP_RV670) {
1311
	} else if ((rdev->family) == CHIP_RV670) {
1312
		sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
1312
		sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
1313
					  NUM_VS_GPRS(44) |
1313
					  NUM_VS_GPRS(44) |
1314
					  NUM_CLAUSE_TEMP_GPRS(2));
1314
					  NUM_CLAUSE_TEMP_GPRS(2));
1315
		sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(17) |
1315
		sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(17) |
1316
					  NUM_ES_GPRS(17));
1316
					  NUM_ES_GPRS(17));
1317
		sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
1317
		sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
1318
					   NUM_VS_THREADS(78) |
1318
					   NUM_VS_THREADS(78) |
1319
					   NUM_GS_THREADS(4) |
1319
					   NUM_GS_THREADS(4) |
1320
					   NUM_ES_THREADS(31));
1320
					   NUM_ES_THREADS(31));
1321
		sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(64) |
1321
		sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(64) |
1322
					    NUM_VS_STACK_ENTRIES(64));
1322
					    NUM_VS_STACK_ENTRIES(64));
1323
		sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(64) |
1323
		sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(64) |
1324
					    NUM_ES_STACK_ENTRIES(64));
1324
					    NUM_ES_STACK_ENTRIES(64));
1325
	}
1325
	}
1326
 
1326
 
1327
	WREG32(SQ_CONFIG, sq_config);
1327
	WREG32(SQ_CONFIG, sq_config);
1328
	WREG32(SQ_GPR_RESOURCE_MGMT_1,  sq_gpr_resource_mgmt_1);
1328
	WREG32(SQ_GPR_RESOURCE_MGMT_1,  sq_gpr_resource_mgmt_1);
1329
	WREG32(SQ_GPR_RESOURCE_MGMT_2,  sq_gpr_resource_mgmt_2);
1329
	WREG32(SQ_GPR_RESOURCE_MGMT_2,  sq_gpr_resource_mgmt_2);
1330
	WREG32(SQ_THREAD_RESOURCE_MGMT, sq_thread_resource_mgmt);
1330
	WREG32(SQ_THREAD_RESOURCE_MGMT, sq_thread_resource_mgmt);
1331
	WREG32(SQ_STACK_RESOURCE_MGMT_1, sq_stack_resource_mgmt_1);
1331
	WREG32(SQ_STACK_RESOURCE_MGMT_1, sq_stack_resource_mgmt_1);
1332
	WREG32(SQ_STACK_RESOURCE_MGMT_2, sq_stack_resource_mgmt_2);
1332
	WREG32(SQ_STACK_RESOURCE_MGMT_2, sq_stack_resource_mgmt_2);
1333
 
1333
 
1334
	if (((rdev->family) == CHIP_RV610) ||
1334
	if (((rdev->family) == CHIP_RV610) ||
1335
	    ((rdev->family) == CHIP_RV620) ||
1335
	    ((rdev->family) == CHIP_RV620) ||
1336
	    ((rdev->family) == CHIP_RS780) ||
1336
	    ((rdev->family) == CHIP_RS780) ||
1337
	    ((rdev->family) == CHIP_RS880)) {
1337
	    ((rdev->family) == CHIP_RS880)) {
1338
		WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(TC_ONLY));
1338
		WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(TC_ONLY));
1339
	} else {
1339
	} else {
1340
		WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC));
1340
		WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC));
1341
	}
1341
	}
1342
 
1342
 
1343
	/* More default values. 2D/3D driver should adjust as needed */
1343
	/* More default values. 2D/3D driver should adjust as needed */
1344
	WREG32(PA_SC_AA_SAMPLE_LOCS_2S, (S0_X(0xc) | S0_Y(0x4) |
1344
	WREG32(PA_SC_AA_SAMPLE_LOCS_2S, (S0_X(0xc) | S0_Y(0x4) |
1345
					 S1_X(0x4) | S1_Y(0xc)));
1345
					 S1_X(0x4) | S1_Y(0xc)));
1346
	WREG32(PA_SC_AA_SAMPLE_LOCS_4S, (S0_X(0xe) | S0_Y(0xe) |
1346
	WREG32(PA_SC_AA_SAMPLE_LOCS_4S, (S0_X(0xe) | S0_Y(0xe) |
1347
					 S1_X(0x2) | S1_Y(0x2) |
1347
					 S1_X(0x2) | S1_Y(0x2) |
1348
					 S2_X(0xa) | S2_Y(0x6) |
1348
					 S2_X(0xa) | S2_Y(0x6) |
1349
					 S3_X(0x6) | S3_Y(0xa)));
1349
					 S3_X(0x6) | S3_Y(0xa)));
1350
	WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD0, (S0_X(0xe) | S0_Y(0xb) |
1350
	WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD0, (S0_X(0xe) | S0_Y(0xb) |
1351
					     S1_X(0x4) | S1_Y(0xc) |
1351
					     S1_X(0x4) | S1_Y(0xc) |
1352
					     S2_X(0x1) | S2_Y(0x6) |
1352
					     S2_X(0x1) | S2_Y(0x6) |
1353
					     S3_X(0xa) | S3_Y(0xe)));
1353
					     S3_X(0xa) | S3_Y(0xe)));
1354
	WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD1, (S4_X(0x6) | S4_Y(0x1) |
1354
	WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD1, (S4_X(0x6) | S4_Y(0x1) |
1355
					     S5_X(0x0) | S5_Y(0x0) |
1355
					     S5_X(0x0) | S5_Y(0x0) |
1356
					     S6_X(0xb) | S6_Y(0x4) |
1356
					     S6_X(0xb) | S6_Y(0x4) |
1357
					     S7_X(0x7) | S7_Y(0x8)));
1357
					     S7_X(0x7) | S7_Y(0x8)));
1358
 
1358
 
1359
	WREG32(VGT_STRMOUT_EN, 0);
1359
	WREG32(VGT_STRMOUT_EN, 0);
1360
	tmp = rdev->config.r600.max_pipes * 16;
1360
	tmp = rdev->config.r600.max_pipes * 16;
1361
	switch (rdev->family) {
1361
	switch (rdev->family) {
1362
	case CHIP_RV610:
1362
	case CHIP_RV610:
1363
	case CHIP_RV620:
1363
	case CHIP_RV620:
1364
	case CHIP_RS780:
1364
	case CHIP_RS780:
1365
	case CHIP_RS880:
1365
	case CHIP_RS880:
1366
		tmp += 32;
1366
		tmp += 32;
1367
		break;
1367
		break;
1368
	case CHIP_RV670:
1368
	case CHIP_RV670:
1369
		tmp += 128;
1369
		tmp += 128;
1370
		break;
1370
		break;
1371
	default:
1371
	default:
1372
		break;
1372
		break;
1373
	}
1373
	}
1374
	if (tmp > 256) {
1374
	if (tmp > 256) {
1375
		tmp = 256;
1375
		tmp = 256;
1376
	}
1376
	}
1377
	WREG32(VGT_ES_PER_GS, 128);
1377
	WREG32(VGT_ES_PER_GS, 128);
1378
	WREG32(VGT_GS_PER_ES, tmp);
1378
	WREG32(VGT_GS_PER_ES, tmp);
1379
	WREG32(VGT_GS_PER_VS, 2);
1379
	WREG32(VGT_GS_PER_VS, 2);
1380
	WREG32(VGT_GS_VERTEX_REUSE, 16);
1380
	WREG32(VGT_GS_VERTEX_REUSE, 16);
1381
 
1381
 
1382
	/* more default values. 2D/3D driver should adjust as needed */
1382
	/* more default values. 2D/3D driver should adjust as needed */
1383
	WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
1383
	WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
1384
	WREG32(VGT_STRMOUT_EN, 0);
1384
	WREG32(VGT_STRMOUT_EN, 0);
1385
	WREG32(SX_MISC, 0);
1385
	WREG32(SX_MISC, 0);
1386
	WREG32(PA_SC_MODE_CNTL, 0);
1386
	WREG32(PA_SC_MODE_CNTL, 0);
1387
	WREG32(PA_SC_AA_CONFIG, 0);
1387
	WREG32(PA_SC_AA_CONFIG, 0);
1388
	WREG32(PA_SC_LINE_STIPPLE, 0);
1388
	WREG32(PA_SC_LINE_STIPPLE, 0);
1389
	WREG32(SPI_INPUT_Z, 0);
1389
	WREG32(SPI_INPUT_Z, 0);
1390
	WREG32(SPI_PS_IN_CONTROL_0, NUM_INTERP(2));
1390
	WREG32(SPI_PS_IN_CONTROL_0, NUM_INTERP(2));
1391
	WREG32(CB_COLOR7_FRAG, 0);
1391
	WREG32(CB_COLOR7_FRAG, 0);
1392
 
1392
 
1393
	/* Clear render buffer base addresses */
1393
	/* Clear render buffer base addresses */
1394
	WREG32(CB_COLOR0_BASE, 0);
1394
	WREG32(CB_COLOR0_BASE, 0);
1395
	WREG32(CB_COLOR1_BASE, 0);
1395
	WREG32(CB_COLOR1_BASE, 0);
1396
	WREG32(CB_COLOR2_BASE, 0);
1396
	WREG32(CB_COLOR2_BASE, 0);
1397
	WREG32(CB_COLOR3_BASE, 0);
1397
	WREG32(CB_COLOR3_BASE, 0);
1398
	WREG32(CB_COLOR4_BASE, 0);
1398
	WREG32(CB_COLOR4_BASE, 0);
1399
	WREG32(CB_COLOR5_BASE, 0);
1399
	WREG32(CB_COLOR5_BASE, 0);
1400
	WREG32(CB_COLOR6_BASE, 0);
1400
	WREG32(CB_COLOR6_BASE, 0);
1401
	WREG32(CB_COLOR7_BASE, 0);
1401
	WREG32(CB_COLOR7_BASE, 0);
1402
	WREG32(CB_COLOR7_FRAG, 0);
1402
	WREG32(CB_COLOR7_FRAG, 0);
1403
 
1403
 
1404
	switch (rdev->family) {
1404
	switch (rdev->family) {
1405
	case CHIP_RV610:
1405
	case CHIP_RV610:
1406
	case CHIP_RV620:
1406
	case CHIP_RV620:
1407
	case CHIP_RS780:
1407
	case CHIP_RS780:
1408
	case CHIP_RS880:
1408
	case CHIP_RS880:
1409
		tmp = TC_L2_SIZE(8);
1409
		tmp = TC_L2_SIZE(8);
1410
		break;
1410
		break;
1411
	case CHIP_RV630:
1411
	case CHIP_RV630:
1412
	case CHIP_RV635:
1412
	case CHIP_RV635:
1413
		tmp = TC_L2_SIZE(4);
1413
		tmp = TC_L2_SIZE(4);
1414
		break;
1414
		break;
1415
	case CHIP_R600:
1415
	case CHIP_R600:
1416
		tmp = TC_L2_SIZE(0) | L2_DISABLE_LATE_HIT;
1416
		tmp = TC_L2_SIZE(0) | L2_DISABLE_LATE_HIT;
1417
		break;
1417
		break;
1418
	default:
1418
	default:
1419
		tmp = TC_L2_SIZE(0);
1419
		tmp = TC_L2_SIZE(0);
1420
		break;
1420
		break;
1421
	}
1421
	}
1422
	WREG32(TC_CNTL, tmp);
1422
	WREG32(TC_CNTL, tmp);
1423
 
1423
 
1424
	tmp = RREG32(HDP_HOST_PATH_CNTL);
1424
	tmp = RREG32(HDP_HOST_PATH_CNTL);
1425
	WREG32(HDP_HOST_PATH_CNTL, tmp);
1425
	WREG32(HDP_HOST_PATH_CNTL, tmp);
1426
 
1426
 
1427
	tmp = RREG32(ARB_POP);
1427
	tmp = RREG32(ARB_POP);
1428
	tmp |= ENABLE_TC128;
1428
	tmp |= ENABLE_TC128;
1429
	WREG32(ARB_POP, tmp);
1429
	WREG32(ARB_POP, tmp);
1430
 
1430
 
1431
	WREG32(PA_SC_MULTI_CHIP_CNTL, 0);
1431
	WREG32(PA_SC_MULTI_CHIP_CNTL, 0);
1432
	WREG32(PA_CL_ENHANCE, (CLIP_VTX_REORDER_ENA |
1432
	WREG32(PA_CL_ENHANCE, (CLIP_VTX_REORDER_ENA |
1433
			       NUM_CLIP_SEQ(3)));
1433
			       NUM_CLIP_SEQ(3)));
1434
	WREG32(PA_SC_ENHANCE, FORCE_EOV_MAX_CLK_CNT(4095));
1434
	WREG32(PA_SC_ENHANCE, FORCE_EOV_MAX_CLK_CNT(4095));
1435
}
1435
}
1436
 
1436
 
1437
 
1437
 
1438
/*
1438
/*
1439
 * Indirect registers accessor
1439
 * Indirect registers accessor
1440
 */
1440
 */
1441
u32 r600_pciep_rreg(struct radeon_device *rdev, u32 reg)
1441
u32 r600_pciep_rreg(struct radeon_device *rdev, u32 reg)
1442
{
1442
{
1443
	u32 r;
1443
	u32 r;
1444
 
1444
 
1445
	WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
1445
	WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
1446
	(void)RREG32(PCIE_PORT_INDEX);
1446
	(void)RREG32(PCIE_PORT_INDEX);
1447
	r = RREG32(PCIE_PORT_DATA);
1447
	r = RREG32(PCIE_PORT_DATA);
1448
	return r;
1448
	return r;
1449
}
1449
}
1450
 
1450
 
1451
void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v)
1451
void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v)
1452
{
1452
{
1453
	WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
1453
	WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
1454
	(void)RREG32(PCIE_PORT_INDEX);
1454
	(void)RREG32(PCIE_PORT_INDEX);
1455
	WREG32(PCIE_PORT_DATA, (v));
1455
	WREG32(PCIE_PORT_DATA, (v));
1456
	(void)RREG32(PCIE_PORT_DATA);
1456
	(void)RREG32(PCIE_PORT_DATA);
1457
}
1457
}
1458
 
1458
 
1459
/*
1459
/*
1460
 * CP & Ring
1460
 * CP & Ring
1461
 */
1461
 */
1462
void r600_cp_stop(struct radeon_device *rdev)
1462
void r600_cp_stop(struct radeon_device *rdev)
1463
{
1463
{
1464
//   radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
1464
//   radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
1465
	WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
1465
	WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
1466
	WREG32(SCRATCH_UMSK, 0);
1466
	WREG32(SCRATCH_UMSK, 0);
1467
}
1467
}
1468
 
1468
 
1469
int r600_init_microcode(struct radeon_device *rdev)
1469
int r600_init_microcode(struct radeon_device *rdev)
1470
{
1470
{
1471
	struct platform_device *pdev;
1471
	struct platform_device *pdev;
1472
	const char *chip_name;
1472
	const char *chip_name;
1473
	const char *rlc_chip_name;
1473
	const char *rlc_chip_name;
1474
	size_t pfp_req_size, me_req_size, rlc_req_size;
1474
	size_t pfp_req_size, me_req_size, rlc_req_size;
1475
	char fw_name[30];
1475
	char fw_name[30];
1476
	int err;
1476
	int err;
1477
 
1477
 
1478
	DRM_DEBUG("\n");
1478
	DRM_DEBUG("\n");
1479
 
1479
 
1480
	pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0);
1480
	pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0);
1481
	err = IS_ERR(pdev);
1481
	err = IS_ERR(pdev);
1482
	if (err) {
1482
	if (err) {
1483
		printk(KERN_ERR "radeon_cp: Failed to register firmware\n");
1483
		printk(KERN_ERR "radeon_cp: Failed to register firmware\n");
1484
		return -EINVAL;
1484
		return -EINVAL;
1485
	}
1485
	}
1486
 
1486
 
1487
	switch (rdev->family) {
1487
	switch (rdev->family) {
1488
	case CHIP_R600:
1488
	case CHIP_R600:
1489
		chip_name = "R600";
1489
		chip_name = "R600";
1490
		rlc_chip_name = "R600";
1490
		rlc_chip_name = "R600";
1491
		break;
1491
		break;
1492
	case CHIP_RV610:
1492
	case CHIP_RV610:
1493
		chip_name = "RV610";
1493
		chip_name = "RV610";
1494
		rlc_chip_name = "R600";
1494
		rlc_chip_name = "R600";
1495
		break;
1495
		break;
1496
	case CHIP_RV630:
1496
	case CHIP_RV630:
1497
		chip_name = "RV630";
1497
		chip_name = "RV630";
1498
		rlc_chip_name = "R600";
1498
		rlc_chip_name = "R600";
1499
		break;
1499
		break;
1500
	case CHIP_RV620:
1500
	case CHIP_RV620:
1501
		chip_name = "RV620";
1501
		chip_name = "RV620";
1502
		rlc_chip_name = "R600";
1502
		rlc_chip_name = "R600";
1503
		break;
1503
		break;
1504
	case CHIP_RV635:
1504
	case CHIP_RV635:
1505
		chip_name = "RV635";
1505
		chip_name = "RV635";
1506
		rlc_chip_name = "R600";
1506
		rlc_chip_name = "R600";
1507
		break;
1507
		break;
1508
	case CHIP_RV670:
1508
	case CHIP_RV670:
1509
		chip_name = "RV670";
1509
		chip_name = "RV670";
1510
		rlc_chip_name = "R600";
1510
		rlc_chip_name = "R600";
1511
		break;
1511
		break;
1512
	case CHIP_RS780:
1512
	case CHIP_RS780:
1513
	case CHIP_RS880:
1513
	case CHIP_RS880:
1514
		chip_name = "RS780";
1514
		chip_name = "RS780";
1515
		rlc_chip_name = "R600";
1515
		rlc_chip_name = "R600";
1516
		break;
1516
		break;
1517
	case CHIP_RV770:
1517
	case CHIP_RV770:
1518
		chip_name = "RV770";
1518
		chip_name = "RV770";
1519
		rlc_chip_name = "R700";
1519
		rlc_chip_name = "R700";
1520
		break;
1520
		break;
1521
	case CHIP_RV730:
1521
	case CHIP_RV730:
1522
	case CHIP_RV740:
1522
	case CHIP_RV740:
1523
		chip_name = "RV730";
1523
		chip_name = "RV730";
1524
		rlc_chip_name = "R700";
1524
		rlc_chip_name = "R700";
1525
		break;
1525
		break;
1526
	case CHIP_RV710:
1526
	case CHIP_RV710:
1527
		chip_name = "RV710";
1527
		chip_name = "RV710";
1528
		rlc_chip_name = "R700";
1528
		rlc_chip_name = "R700";
1529
		break;
1529
		break;
1530
	case CHIP_CEDAR:
1530
	case CHIP_CEDAR:
1531
		chip_name = "CEDAR";
1531
		chip_name = "CEDAR";
1532
		rlc_chip_name = "CEDAR";
1532
		rlc_chip_name = "CEDAR";
1533
		break;
1533
		break;
1534
	case CHIP_REDWOOD:
1534
	case CHIP_REDWOOD:
1535
		chip_name = "REDWOOD";
1535
		chip_name = "REDWOOD";
1536
		rlc_chip_name = "REDWOOD";
1536
		rlc_chip_name = "REDWOOD";
1537
		break;
1537
		break;
1538
	case CHIP_JUNIPER:
1538
	case CHIP_JUNIPER:
1539
		chip_name = "JUNIPER";
1539
		chip_name = "JUNIPER";
1540
		rlc_chip_name = "JUNIPER";
1540
		rlc_chip_name = "JUNIPER";
1541
		break;
1541
		break;
1542
	case CHIP_CYPRESS:
1542
	case CHIP_CYPRESS:
1543
	case CHIP_HEMLOCK:
1543
	case CHIP_HEMLOCK:
1544
		chip_name = "CYPRESS";
1544
		chip_name = "CYPRESS";
1545
		rlc_chip_name = "CYPRESS";
1545
		rlc_chip_name = "CYPRESS";
1546
		break;
1546
		break;
1547
	case CHIP_PALM:
1547
	case CHIP_PALM:
1548
		chip_name = "PALM";
1548
		chip_name = "PALM";
1549
		rlc_chip_name = "SUMO";
1549
		rlc_chip_name = "SUMO";
1550
		break;
1550
		break;
1551
	case CHIP_SUMO:
1551
	case CHIP_SUMO:
1552
		chip_name = "SUMO";
1552
		chip_name = "SUMO";
1553
		rlc_chip_name = "SUMO";
1553
		rlc_chip_name = "SUMO";
1554
		break;
1554
		break;
1555
	case CHIP_SUMO2:
1555
	case CHIP_SUMO2:
1556
		chip_name = "SUMO2";
1556
		chip_name = "SUMO2";
1557
		rlc_chip_name = "SUMO";
1557
		rlc_chip_name = "SUMO";
1558
		break;
1558
		break;
1559
	default: BUG();
1559
	default: BUG();
1560
	}
1560
	}
1561
 
1561
 
1562
	if (rdev->family >= CHIP_CEDAR) {
1562
	if (rdev->family >= CHIP_CEDAR) {
1563
		pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4;
1563
		pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4;
1564
		me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
1564
		me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
1565
		rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
1565
		rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
1566
	} else if (rdev->family >= CHIP_RV770) {
1566
	} else if (rdev->family >= CHIP_RV770) {
1567
		pfp_req_size = R700_PFP_UCODE_SIZE * 4;
1567
		pfp_req_size = R700_PFP_UCODE_SIZE * 4;
1568
		me_req_size = R700_PM4_UCODE_SIZE * 4;
1568
		me_req_size = R700_PM4_UCODE_SIZE * 4;
1569
		rlc_req_size = R700_RLC_UCODE_SIZE * 4;
1569
		rlc_req_size = R700_RLC_UCODE_SIZE * 4;
1570
	} else {
1570
	} else {
1571
		pfp_req_size = PFP_UCODE_SIZE * 4;
1571
		pfp_req_size = PFP_UCODE_SIZE * 4;
1572
		me_req_size = PM4_UCODE_SIZE * 12;
1572
		me_req_size = PM4_UCODE_SIZE * 12;
1573
		rlc_req_size = RLC_UCODE_SIZE * 4;
1573
		rlc_req_size = RLC_UCODE_SIZE * 4;
1574
	}
1574
	}
1575
 
1575
 
1576
	DRM_INFO("Loading %s Microcode\n", chip_name);
1576
	DRM_INFO("Loading %s Microcode\n", chip_name);
1577
 
1577
 
1578
	snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
1578
	snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
1579
	err = request_firmware(&rdev->pfp_fw, fw_name, &pdev->dev);
1579
	err = request_firmware(&rdev->pfp_fw, fw_name, &pdev->dev);
1580
	if (err)
1580
	if (err)
1581
		goto out;
1581
		goto out;
1582
	if (rdev->pfp_fw->size != pfp_req_size) {
1582
	if (rdev->pfp_fw->size != pfp_req_size) {
1583
		printk(KERN_ERR
1583
		printk(KERN_ERR
1584
		       "r600_cp: Bogus length %zu in firmware \"%s\"\n",
1584
		       "r600_cp: Bogus length %zu in firmware \"%s\"\n",
1585
		       rdev->pfp_fw->size, fw_name);
1585
		       rdev->pfp_fw->size, fw_name);
1586
		err = -EINVAL;
1586
		err = -EINVAL;
1587
		goto out;
1587
		goto out;
1588
	}
1588
	}
1589
 
1589
 
1590
	snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
1590
	snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
1591
	err = request_firmware(&rdev->me_fw, fw_name, &pdev->dev);
1591
	err = request_firmware(&rdev->me_fw, fw_name, &pdev->dev);
1592
	if (err)
1592
	if (err)
1593
		goto out;
1593
		goto out;
1594
	if (rdev->me_fw->size != me_req_size) {
1594
	if (rdev->me_fw->size != me_req_size) {
1595
		printk(KERN_ERR
1595
		printk(KERN_ERR
1596
		       "r600_cp: Bogus length %zu in firmware \"%s\"\n",
1596
		       "r600_cp: Bogus length %zu in firmware \"%s\"\n",
1597
		       rdev->me_fw->size, fw_name);
1597
		       rdev->me_fw->size, fw_name);
1598
		err = -EINVAL;
1598
		err = -EINVAL;
1599
	}
1599
	}
1600
 
1600
 
1601
	snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name);
1601
	snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name);
1602
	err = request_firmware(&rdev->rlc_fw, fw_name, &pdev->dev);
1602
	err = request_firmware(&rdev->rlc_fw, fw_name, &pdev->dev);
1603
	if (err)
1603
	if (err)
1604
		goto out;
1604
		goto out;
1605
	if (rdev->rlc_fw->size != rlc_req_size) {
1605
	if (rdev->rlc_fw->size != rlc_req_size) {
1606
		printk(KERN_ERR
1606
		printk(KERN_ERR
1607
		       "r600_rlc: Bogus length %zu in firmware \"%s\"\n",
1607
		       "r600_rlc: Bogus length %zu in firmware \"%s\"\n",
1608
		       rdev->rlc_fw->size, fw_name);
1608
		       rdev->rlc_fw->size, fw_name);
1609
		err = -EINVAL;
1609
		err = -EINVAL;
1610
	}
1610
	}
1611
 
1611
 
1612
out:
1612
out:
1613
	platform_device_unregister(pdev);
1613
	platform_device_unregister(pdev);
1614
 
1614
 
1615
	if (err) {
1615
	if (err) {
1616
		if (err != -EINVAL)
1616
		if (err != -EINVAL)
1617
			printk(KERN_ERR
1617
			printk(KERN_ERR
1618
			       "r600_cp: Failed to load firmware \"%s\"\n",
1618
			       "r600_cp: Failed to load firmware \"%s\"\n",
1619
			       fw_name);
1619
			       fw_name);
1620
		release_firmware(rdev->pfp_fw);
1620
		release_firmware(rdev->pfp_fw);
1621
		rdev->pfp_fw = NULL;
1621
		rdev->pfp_fw = NULL;
1622
		release_firmware(rdev->me_fw);
1622
		release_firmware(rdev->me_fw);
1623
		rdev->me_fw = NULL;
1623
		rdev->me_fw = NULL;
1624
		release_firmware(rdev->rlc_fw);
1624
		release_firmware(rdev->rlc_fw);
1625
		rdev->rlc_fw = NULL;
1625
		rdev->rlc_fw = NULL;
1626
	}
1626
	}
1627
	return err;
1627
	return err;
1628
}
1628
}
1629
 
1629
 
1630
static int r600_cp_load_microcode(struct radeon_device *rdev)
1630
static int r600_cp_load_microcode(struct radeon_device *rdev)
1631
{
1631
{
1632
	const __be32 *fw_data;
1632
	const __be32 *fw_data;
1633
	int i;
1633
	int i;
1634
 
1634
 
1635
	if (!rdev->me_fw || !rdev->pfp_fw)
1635
	if (!rdev->me_fw || !rdev->pfp_fw)
1636
		return -EINVAL;
1636
		return -EINVAL;
1637
 
1637
 
1638
	r600_cp_stop(rdev);
1638
	r600_cp_stop(rdev);
1639
 
1639
 
1640
	WREG32(CP_RB_CNTL,
1640
	WREG32(CP_RB_CNTL,
1641
#ifdef __BIG_ENDIAN
1641
#ifdef __BIG_ENDIAN
1642
	       BUF_SWAP_32BIT |
1642
	       BUF_SWAP_32BIT |
1643
#endif
1643
#endif
1644
	       RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3));
1644
	       RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3));
1645
 
1645
 
1646
	/* Reset cp */
1646
	/* Reset cp */
1647
	WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
1647
	WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
1648
	RREG32(GRBM_SOFT_RESET);
1648
	RREG32(GRBM_SOFT_RESET);
1649
	mdelay(15);
1649
	mdelay(15);
1650
	WREG32(GRBM_SOFT_RESET, 0);
1650
	WREG32(GRBM_SOFT_RESET, 0);
1651
 
1651
 
1652
	WREG32(CP_ME_RAM_WADDR, 0);
1652
	WREG32(CP_ME_RAM_WADDR, 0);
1653
 
1653
 
1654
	fw_data = (const __be32 *)rdev->me_fw->data;
1654
	fw_data = (const __be32 *)rdev->me_fw->data;
1655
	WREG32(CP_ME_RAM_WADDR, 0);
1655
	WREG32(CP_ME_RAM_WADDR, 0);
1656
	for (i = 0; i < PM4_UCODE_SIZE * 3; i++)
1656
	for (i = 0; i < PM4_UCODE_SIZE * 3; i++)
1657
		WREG32(CP_ME_RAM_DATA,
1657
		WREG32(CP_ME_RAM_DATA,
1658
		       be32_to_cpup(fw_data++));
1658
		       be32_to_cpup(fw_data++));
1659
 
1659
 
1660
	fw_data = (const __be32 *)rdev->pfp_fw->data;
1660
	fw_data = (const __be32 *)rdev->pfp_fw->data;
1661
	WREG32(CP_PFP_UCODE_ADDR, 0);
1661
	WREG32(CP_PFP_UCODE_ADDR, 0);
1662
	for (i = 0; i < PFP_UCODE_SIZE; i++)
1662
	for (i = 0; i < PFP_UCODE_SIZE; i++)
1663
		WREG32(CP_PFP_UCODE_DATA,
1663
		WREG32(CP_PFP_UCODE_DATA,
1664
		       be32_to_cpup(fw_data++));
1664
		       be32_to_cpup(fw_data++));
1665
 
1665
 
1666
	WREG32(CP_PFP_UCODE_ADDR, 0);
1666
	WREG32(CP_PFP_UCODE_ADDR, 0);
1667
	WREG32(CP_ME_RAM_WADDR, 0);
1667
	WREG32(CP_ME_RAM_WADDR, 0);
1668
	WREG32(CP_ME_RAM_RADDR, 0);
1668
	WREG32(CP_ME_RAM_RADDR, 0);
1669
	return 0;
1669
	return 0;
1670
}
1670
}
1671
 
1671
 
1672
int r600_cp_start(struct radeon_device *rdev)
1672
int r600_cp_start(struct radeon_device *rdev)
1673
{
1673
{
1674
	int r;
1674
	int r;
1675
	uint32_t cp_me;
1675
	uint32_t cp_me;
1676
 
1676
 
1677
	r = radeon_ring_lock(rdev, 7);
1677
	r = radeon_ring_lock(rdev, 7);
1678
	if (r) {
1678
	if (r) {
1679
		DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
1679
		DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
1680
		return r;
1680
		return r;
1681
	}
1681
	}
1682
	radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5));
1682
	radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5));
1683
	radeon_ring_write(rdev, 0x1);
1683
	radeon_ring_write(rdev, 0x1);
1684
	if (rdev->family >= CHIP_RV770) {
1684
	if (rdev->family >= CHIP_RV770) {
1685
		radeon_ring_write(rdev, 0x0);
1685
		radeon_ring_write(rdev, 0x0);
1686
		radeon_ring_write(rdev, rdev->config.rv770.max_hw_contexts - 1);
1686
		radeon_ring_write(rdev, rdev->config.rv770.max_hw_contexts - 1);
1687
	} else {
1687
	} else {
1688
		radeon_ring_write(rdev, 0x3);
1688
		radeon_ring_write(rdev, 0x3);
1689
		radeon_ring_write(rdev, rdev->config.r600.max_hw_contexts - 1);
1689
		radeon_ring_write(rdev, rdev->config.r600.max_hw_contexts - 1);
1690
	}
1690
	}
1691
	radeon_ring_write(rdev, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
1691
	radeon_ring_write(rdev, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
1692
	radeon_ring_write(rdev, 0);
1692
	radeon_ring_write(rdev, 0);
1693
	radeon_ring_write(rdev, 0);
1693
	radeon_ring_write(rdev, 0);
1694
	radeon_ring_unlock_commit(rdev);
1694
	radeon_ring_unlock_commit(rdev);
1695
 
1695
 
1696
	cp_me = 0xff;
1696
	cp_me = 0xff;
1697
	WREG32(R_0086D8_CP_ME_CNTL, cp_me);
1697
	WREG32(R_0086D8_CP_ME_CNTL, cp_me);
1698
	return 0;
1698
	return 0;
1699
}
1699
}
1700
 
1700
 
1701
int r600_cp_resume(struct radeon_device *rdev)
1701
int r600_cp_resume(struct radeon_device *rdev)
1702
{
1702
{
1703
	u32 tmp;
1703
	u32 tmp;
1704
	u32 rb_bufsz;
1704
	u32 rb_bufsz;
1705
	int r;
1705
	int r;
1706
 
1706
 
1707
	/* Reset cp */
1707
	/* Reset cp */
1708
	WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
1708
	WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
1709
	RREG32(GRBM_SOFT_RESET);
1709
	RREG32(GRBM_SOFT_RESET);
1710
	mdelay(15);
1710
	mdelay(15);
1711
	WREG32(GRBM_SOFT_RESET, 0);
1711
	WREG32(GRBM_SOFT_RESET, 0);
1712
 
1712
 
1713
	/* Set ring buffer size */
1713
	/* Set ring buffer size */
1714
	rb_bufsz = drm_order(rdev->cp.ring_size / 8);
1714
	rb_bufsz = drm_order(rdev->cp.ring_size / 8);
1715
	tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
1715
	tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
1716
#ifdef __BIG_ENDIAN
1716
#ifdef __BIG_ENDIAN
1717
	tmp |= BUF_SWAP_32BIT;
1717
	tmp |= BUF_SWAP_32BIT;
1718
#endif
1718
#endif
1719
	WREG32(CP_RB_CNTL, tmp);
1719
	WREG32(CP_RB_CNTL, tmp);
1720
	WREG32(CP_SEM_WAIT_TIMER, 0x4);
1720
	WREG32(CP_SEM_WAIT_TIMER, 0x4);
1721
 
1721
 
1722
	/* Set the write pointer delay */
1722
	/* Set the write pointer delay */
1723
	WREG32(CP_RB_WPTR_DELAY, 0);
1723
	WREG32(CP_RB_WPTR_DELAY, 0);
1724
 
1724
 
1725
	/* Initialize the ring buffer's read and write pointers */
1725
	/* Initialize the ring buffer's read and write pointers */
1726
	WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
1726
	WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
1727
	WREG32(CP_RB_RPTR_WR, 0);
1727
	WREG32(CP_RB_RPTR_WR, 0);
1728
	WREG32(CP_RB_WPTR, 0);
1728
	WREG32(CP_RB_WPTR, 0);
1729
 
1729
 
1730
	/* set the wb address whether it's enabled or not */
1730
	/* set the wb address whether it's enabled or not */
1731
	WREG32(CP_RB_RPTR_ADDR,
1731
	WREG32(CP_RB_RPTR_ADDR,
1732
#ifdef __BIG_ENDIAN
1732
#ifdef __BIG_ENDIAN
1733
	       RB_RPTR_SWAP(2) |
1733
	       RB_RPTR_SWAP(2) |
1734
#endif
1734
#endif
1735
	       ((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC));
1735
	       ((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC));
1736
	WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
1736
	WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
1737
	WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
1737
	WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
1738
 
1738
 
1739
	if (rdev->wb.enabled)
1739
	if (rdev->wb.enabled)
1740
		WREG32(SCRATCH_UMSK, 0xff);
1740
		WREG32(SCRATCH_UMSK, 0xff);
1741
	else {
1741
	else {
1742
		tmp |= RB_NO_UPDATE;
1742
		tmp |= RB_NO_UPDATE;
1743
		WREG32(SCRATCH_UMSK, 0);
1743
		WREG32(SCRATCH_UMSK, 0);
1744
	}
1744
	}
1745
 
1745
 
1746
	mdelay(1);
1746
	mdelay(1);
1747
	WREG32(CP_RB_CNTL, tmp);
1747
	WREG32(CP_RB_CNTL, tmp);
1748
 
1748
 
1749
	WREG32(CP_RB_BASE, rdev->cp.gpu_addr >> 8);
1749
	WREG32(CP_RB_BASE, rdev->cp.gpu_addr >> 8);
1750
	WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
1750
	WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
1751
 
1751
 
1752
	rdev->cp.rptr = RREG32(CP_RB_RPTR);
1752
	rdev->cp.rptr = RREG32(CP_RB_RPTR);
1753
	rdev->cp.wptr = RREG32(CP_RB_WPTR);
1753
	rdev->cp.wptr = RREG32(CP_RB_WPTR);
1754
 
1754
 
1755
	r600_cp_start(rdev);
1755
	r600_cp_start(rdev);
1756
	rdev->cp.ready = true;
1756
	rdev->cp.ready = true;
1757
	r = radeon_ring_test(rdev);
1757
	r = radeon_ring_test(rdev);
1758
	if (r) {
1758
	if (r) {
1759
		rdev->cp.ready = false;
1759
		rdev->cp.ready = false;
1760
		return r;
1760
		return r;
1761
	}
1761
	}
1762
	return 0;
1762
	return 0;
1763
}
1763
}
1764
 
1764
 
1765
void r600_cp_commit(struct radeon_device *rdev)
1765
void r600_cp_commit(struct radeon_device *rdev)
1766
{
1766
{
1767
	WREG32(CP_RB_WPTR, rdev->cp.wptr);
1767
	WREG32(CP_RB_WPTR, rdev->cp.wptr);
1768
	(void)RREG32(CP_RB_WPTR);
1768
	(void)RREG32(CP_RB_WPTR);
1769
}
1769
}
1770
 
1770
 
1771
void r600_ring_init(struct radeon_device *rdev, unsigned ring_size)
1771
void r600_ring_init(struct radeon_device *rdev, unsigned ring_size)
1772
{
1772
{
1773
	u32 rb_bufsz;
1773
	u32 rb_bufsz;
1774
 
1774
 
1775
	/* Align ring size */
1775
	/* Align ring size */
1776
	rb_bufsz = drm_order(ring_size / 8);
1776
	rb_bufsz = drm_order(ring_size / 8);
1777
	ring_size = (1 << (rb_bufsz + 1)) * 4;
1777
	ring_size = (1 << (rb_bufsz + 1)) * 4;
1778
	rdev->cp.ring_size = ring_size;
1778
	rdev->cp.ring_size = ring_size;
1779
	rdev->cp.align_mask = 16 - 1;
1779
	rdev->cp.align_mask = 16 - 1;
1780
}
1780
}
1781
 
1781
 
1782
void r600_cp_fini(struct radeon_device *rdev)
1782
void r600_cp_fini(struct radeon_device *rdev)
1783
{
1783
{
1784
	r600_cp_stop(rdev);
1784
	r600_cp_stop(rdev);
1785
	radeon_ring_fini(rdev);
1785
	radeon_ring_fini(rdev);
1786
}
1786
}
1787
 
1787
 
1788
 
1788
 
1789
/*
1789
/*
1790
 * GPU scratch registers helpers function.
1790
 * GPU scratch registers helpers function.
1791
 */
1791
 */
1792
void r600_scratch_init(struct radeon_device *rdev)
1792
void r600_scratch_init(struct radeon_device *rdev)
1793
{
1793
{
1794
	int i;
1794
	int i;
1795
 
1795
 
1796
	rdev->scratch.num_reg = 7;
1796
	rdev->scratch.num_reg = 7;
1797
	rdev->scratch.reg_base = SCRATCH_REG0;
1797
	rdev->scratch.reg_base = SCRATCH_REG0;
1798
	for (i = 0; i < rdev->scratch.num_reg; i++) {
1798
	for (i = 0; i < rdev->scratch.num_reg; i++) {
1799
		rdev->scratch.free[i] = true;
1799
		rdev->scratch.free[i] = true;
1800
		rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
1800
		rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
1801
	}
1801
	}
1802
}
1802
}
1803
 
1803
 
1804
int r600_ring_test(struct radeon_device *rdev)
1804
int r600_ring_test(struct radeon_device *rdev)
1805
{
1805
{
1806
	uint32_t scratch;
1806
	uint32_t scratch;
1807
	uint32_t tmp = 0;
1807
	uint32_t tmp = 0;
1808
	unsigned i;
1808
	unsigned i;
1809
	int r;
1809
	int r;
1810
 
1810
 
1811
	r = radeon_scratch_get(rdev, &scratch);
1811
	r = radeon_scratch_get(rdev, &scratch);
1812
	if (r) {
1812
	if (r) {
1813
		DRM_ERROR("radeon: cp failed to get scratch reg (%d).\n", r);
1813
		DRM_ERROR("radeon: cp failed to get scratch reg (%d).\n", r);
1814
		return r;
1814
		return r;
1815
	}
1815
	}
1816
	WREG32(scratch, 0xCAFEDEAD);
1816
	WREG32(scratch, 0xCAFEDEAD);
1817
	r = radeon_ring_lock(rdev, 3);
1817
	r = radeon_ring_lock(rdev, 3);
1818
	if (r) {
1818
	if (r) {
1819
		DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
1819
		DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
1820
		radeon_scratch_free(rdev, scratch);
1820
		radeon_scratch_free(rdev, scratch);
1821
		return r;
1821
		return r;
1822
	}
1822
	}
1823
	radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
1823
	radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
1824
	radeon_ring_write(rdev, ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
1824
	radeon_ring_write(rdev, ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
1825
	radeon_ring_write(rdev, 0xDEADBEEF);
1825
	radeon_ring_write(rdev, 0xDEADBEEF);
1826
	radeon_ring_unlock_commit(rdev);
1826
	radeon_ring_unlock_commit(rdev);
1827
	for (i = 0; i < rdev->usec_timeout; i++) {
1827
	for (i = 0; i < rdev->usec_timeout; i++) {
1828
		tmp = RREG32(scratch);
1828
		tmp = RREG32(scratch);
1829
		if (tmp == 0xDEADBEEF)
1829
		if (tmp == 0xDEADBEEF)
1830
			break;
1830
			break;
1831
		DRM_UDELAY(1);
1831
		DRM_UDELAY(1);
1832
	}
1832
	}
1833
	if (i < rdev->usec_timeout) {
1833
	if (i < rdev->usec_timeout) {
1834
		DRM_INFO("ring test succeeded in %d usecs\n", i);
1834
		DRM_INFO("ring test succeeded in %d usecs\n", i);
1835
	} else {
1835
	} else {
1836
		DRM_ERROR("radeon: ring test failed (scratch(0x%04X)=0x%08X)\n",
1836
		DRM_ERROR("radeon: ring test failed (scratch(0x%04X)=0x%08X)\n",
1837
			  scratch, tmp);
1837
			  scratch, tmp);
1838
		r = -EINVAL;
1838
		r = -EINVAL;
1839
	}
1839
	}
1840
	radeon_scratch_free(rdev, scratch);
1840
	radeon_scratch_free(rdev, scratch);
1841
	return r;
1841
	return r;
1842
}
1842
}
1843
 
1843
 
1844
void r600_fence_ring_emit(struct radeon_device *rdev,
1844
void r600_fence_ring_emit(struct radeon_device *rdev,
1845
			  struct radeon_fence *fence)
1845
			  struct radeon_fence *fence)
1846
{
1846
{
1847
	if (rdev->wb.use_event) {
1847
	if (rdev->wb.use_event) {
1848
		u64 addr = rdev->wb.gpu_addr + R600_WB_EVENT_OFFSET +
1848
		u64 addr = rdev->wb.gpu_addr + R600_WB_EVENT_OFFSET +
1849
			(u64)(rdev->fence_drv.scratch_reg - rdev->scratch.reg_base);
1849
			(u64)(rdev->fence_drv.scratch_reg - rdev->scratch.reg_base);
1850
		/* EVENT_WRITE_EOP - flush caches, send int */
1850
		/* EVENT_WRITE_EOP - flush caches, send int */
1851
		radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
1851
		radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
1852
		radeon_ring_write(rdev, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5));
1852
		radeon_ring_write(rdev, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5));
1853
		radeon_ring_write(rdev, addr & 0xffffffff);
1853
		radeon_ring_write(rdev, addr & 0xffffffff);
1854
		radeon_ring_write(rdev, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2));
1854
		radeon_ring_write(rdev, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2));
1855
		radeon_ring_write(rdev, fence->seq);
1855
		radeon_ring_write(rdev, fence->seq);
1856
		radeon_ring_write(rdev, 0);
1856
		radeon_ring_write(rdev, 0);
1857
	} else {
1857
	} else {
1858
	radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE, 0));
1858
	radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE, 0));
1859
		radeon_ring_write(rdev, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT) | EVENT_INDEX(0));
1859
		radeon_ring_write(rdev, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT) | EVENT_INDEX(0));
1860
	/* wait for 3D idle clean */
1860
	/* wait for 3D idle clean */
1861
	radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
1861
	radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
1862
	radeon_ring_write(rdev, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
1862
	radeon_ring_write(rdev, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
1863
	radeon_ring_write(rdev, WAIT_3D_IDLE_bit | WAIT_3D_IDLECLEAN_bit);
1863
	radeon_ring_write(rdev, WAIT_3D_IDLE_bit | WAIT_3D_IDLECLEAN_bit);
1864
	/* Emit fence sequence & fire IRQ */
1864
	/* Emit fence sequence & fire IRQ */
1865
	radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
1865
	radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
1866
	radeon_ring_write(rdev, ((rdev->fence_drv.scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
1866
	radeon_ring_write(rdev, ((rdev->fence_drv.scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
1867
	radeon_ring_write(rdev, fence->seq);
1867
	radeon_ring_write(rdev, fence->seq);
1868
	/* CP_INTERRUPT packet 3 no longer exists, use packet 0 */
1868
	/* CP_INTERRUPT packet 3 no longer exists, use packet 0 */
1869
	radeon_ring_write(rdev, PACKET0(CP_INT_STATUS, 0));
1869
	radeon_ring_write(rdev, PACKET0(CP_INT_STATUS, 0));
1870
	radeon_ring_write(rdev, RB_INT_STAT);
1870
	radeon_ring_write(rdev, RB_INT_STAT);
1871
	}
1871
	}
1872
}
1872
}
1873
 
1873
 
1874
 
1874
 
1875
int r600_set_surface_reg(struct radeon_device *rdev, int reg,
1875
int r600_set_surface_reg(struct radeon_device *rdev, int reg,
1876
			 uint32_t tiling_flags, uint32_t pitch,
1876
			 uint32_t tiling_flags, uint32_t pitch,
1877
			 uint32_t offset, uint32_t obj_size)
1877
			 uint32_t offset, uint32_t obj_size)
1878
{
1878
{
1879
	/* FIXME: implement */
1879
	/* FIXME: implement */
1880
	return 0;
1880
	return 0;
1881
}
1881
}
1882
 
1882
 
1883
void r600_clear_surface_reg(struct radeon_device *rdev, int reg)
1883
void r600_clear_surface_reg(struct radeon_device *rdev, int reg)
1884
{
1884
{
1885
	/* FIXME: implement */
1885
	/* FIXME: implement */
1886
}
1886
}
1887
 
1887
 
1888
int r600_startup(struct radeon_device *rdev)
1888
int r600_startup(struct radeon_device *rdev)
1889
{
1889
{
1890
	int r;
1890
	int r;
1891
 
1891
 
1892
	/* enable pcie gen2 link */
1892
	/* enable pcie gen2 link */
1893
	r600_pcie_gen2_enable(rdev);
1893
	r600_pcie_gen2_enable(rdev);
1894
 
1894
 
1895
	if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
1895
	if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
1896
		r = r600_init_microcode(rdev);
1896
		r = r600_init_microcode(rdev);
1897
		if (r) {
1897
		if (r) {
1898
			DRM_ERROR("Failed to load firmware!\n");
1898
			DRM_ERROR("Failed to load firmware!\n");
1899
			return r;
1899
			return r;
1900
		}
1900
		}
1901
	}
1901
	}
1902
 
1902
 
1903
	r600_mc_program(rdev);
1903
	r600_mc_program(rdev);
1904
	if (rdev->flags & RADEON_IS_AGP) {
1904
	if (rdev->flags & RADEON_IS_AGP) {
1905
		r600_agp_enable(rdev);
1905
		r600_agp_enable(rdev);
1906
	} else {
1906
	} else {
1907
		r = r600_pcie_gart_enable(rdev);
1907
		r = r600_pcie_gart_enable(rdev);
1908
		if (r)
1908
		if (r)
1909
			return r;
1909
			return r;
1910
	}
1910
	}
1911
	r600_gpu_init(rdev);
1911
	r600_gpu_init(rdev);
1912
 
1912
 
1913
	r = radeon_ring_init(rdev, rdev->cp.ring_size);
1913
	r = radeon_ring_init(rdev, rdev->cp.ring_size);
1914
	if (r)
1914
	if (r)
1915
		return r;
1915
		return r;
1916
	r = r600_cp_load_microcode(rdev);
1916
	r = r600_cp_load_microcode(rdev);
1917
	if (r)
1917
	if (r)
1918
		return r;
1918
		return r;
1919
	r = r600_cp_resume(rdev);
1919
	r = r600_cp_resume(rdev);
1920
	if (r)
1920
	if (r)
1921
		return r;
1921
		return r;
1922
 
1922
 
1923
	return 0;
1923
	return 0;
1924
}
1924
}
1925
 
1925
 
1926
void r600_vga_set_state(struct radeon_device *rdev, bool state)
1926
void r600_vga_set_state(struct radeon_device *rdev, bool state)
1927
{
1927
{
1928
	uint32_t temp;
1928
	uint32_t temp;
1929
 
1929
 
1930
	temp = RREG32(CONFIG_CNTL);
1930
	temp = RREG32(CONFIG_CNTL);
1931
	if (state == false) {
1931
	if (state == false) {
1932
		temp &= ~(1<<0);
1932
		temp &= ~(1<<0);
1933
		temp |= (1<<1);
1933
		temp |= (1<<1);
1934
	} else {
1934
	} else {
1935
		temp &= ~(1<<1);
1935
		temp &= ~(1<<1);
1936
	}
1936
	}
1937
	WREG32(CONFIG_CNTL, temp);
1937
	WREG32(CONFIG_CNTL, temp);
1938
}
1938
}
1939
 
1939
 
1940
 
1940
 
1941
 
1941
 
1942
 
1942
 
1943
 
1943
 
1944
/* Plan is to move initialization in that function and use
1944
/* Plan is to move initialization in that function and use
1945
 * helper function so that radeon_device_init pretty much
1945
 * helper function so that radeon_device_init pretty much
1946
 * do nothing more than calling asic specific function. This
1946
 * do nothing more than calling asic specific function. This
1947
 * should also allow to remove a bunch of callback function
1947
 * should also allow to remove a bunch of callback function
1948
 * like vram_info.
1948
 * like vram_info.
1949
 */
1949
 */
1950
int r600_init(struct radeon_device *rdev)
1950
int r600_init(struct radeon_device *rdev)
1951
{
1951
{
1952
	int r;
1952
	int r;
1953
 
1953
 
1954
	if (r600_debugfs_mc_info_init(rdev)) {
1954
	if (r600_debugfs_mc_info_init(rdev)) {
1955
		DRM_ERROR("Failed to register debugfs file for mc !\n");
1955
		DRM_ERROR("Failed to register debugfs file for mc !\n");
1956
	}
1956
	}
1957
	/* This don't do much */
1957
	/* This don't do much */
1958
//   r = radeon_gem_init(rdev);
1958
	r = radeon_gem_init(rdev);
1959
//   if (r)
1959
	if (r)
1960
//       return r;
1960
		return r;
1961
	/* Read BIOS */
1961
	/* Read BIOS */
1962
	if (!radeon_get_bios(rdev)) {
1962
	if (!radeon_get_bios(rdev)) {
1963
		if (ASIC_IS_AVIVO(rdev))
1963
		if (ASIC_IS_AVIVO(rdev))
1964
			return -EINVAL;
1964
			return -EINVAL;
1965
	}
1965
	}
1966
	/* Must be an ATOMBIOS */
1966
	/* Must be an ATOMBIOS */
1967
	if (!rdev->is_atom_bios) {
1967
	if (!rdev->is_atom_bios) {
1968
		dev_err(rdev->dev, "Expecting atombios for R600 GPU\n");
1968
		dev_err(rdev->dev, "Expecting atombios for R600 GPU\n");
1969
		return -EINVAL;
1969
		return -EINVAL;
1970
	}
1970
	}
1971
	r = radeon_atombios_init(rdev);
1971
	r = radeon_atombios_init(rdev);
1972
	if (r)
1972
	if (r)
1973
		return r;
1973
		return r;
1974
	/* Post card if necessary */
1974
	/* Post card if necessary */
1975
	if (!radeon_card_posted(rdev)) {
1975
	if (!radeon_card_posted(rdev)) {
1976
		if (!rdev->bios) {
1976
		if (!rdev->bios) {
1977
			dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
1977
			dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
1978
			return -EINVAL;
1978
			return -EINVAL;
1979
		}
1979
		}
1980
		DRM_INFO("GPU not posted. posting now...\n");
1980
		DRM_INFO("GPU not posted. posting now...\n");
1981
		atom_asic_init(rdev->mode_info.atom_context);
1981
		atom_asic_init(rdev->mode_info.atom_context);
1982
	}
1982
	}
1983
	/* Initialize scratch registers */
1983
	/* Initialize scratch registers */
1984
	r600_scratch_init(rdev);
1984
	r600_scratch_init(rdev);
1985
	/* Initialize surface registers */
1985
	/* Initialize surface registers */
1986
	radeon_surface_init(rdev);
1986
	radeon_surface_init(rdev);
1987
	/* Initialize clocks */
1987
	/* Initialize clocks */
1988
	radeon_get_clock_info(rdev->ddev);
1988
	radeon_get_clock_info(rdev->ddev);
1989
	/* Fence driver */
1989
	/* Fence driver */
1990
//	r = radeon_fence_driver_init(rdev);
1990
	r = radeon_fence_driver_init(rdev);
1991
//	if (r)
1991
	if (r)
1992
//		return r;
1992
		return r;
1993
	if (rdev->flags & RADEON_IS_AGP) {
1993
	if (rdev->flags & RADEON_IS_AGP) {
1994
		r = radeon_agp_init(rdev);
1994
		r = radeon_agp_init(rdev);
1995
		if (r)
1995
		if (r)
1996
			radeon_agp_disable(rdev);
1996
			radeon_agp_disable(rdev);
1997
	}
1997
	}
1998
	r = r600_mc_init(rdev);
1998
	r = r600_mc_init(rdev);
1999
	if (r)
1999
	if (r)
2000
		return r;
2000
		return r;
2001
	/* Memory manager */
2001
	/* Memory manager */
2002
	r = radeon_bo_init(rdev);
2002
	r = radeon_bo_init(rdev);
2003
	if (r)
2003
	if (r)
2004
		return r;
2004
		return r;
2005
 
2005
 
2006
//	r = radeon_irq_kms_init(rdev);
2006
	r = radeon_irq_kms_init(rdev);
2007
//	if (r)
2007
	if (r)
2008
//		return r;
2008
		return r;
2009
 
2009
 
2010
	rdev->cp.ring_obj = NULL;
2010
	rdev->cp.ring_obj = NULL;
2011
	r600_ring_init(rdev, 1024 * 1024);
2011
	r600_ring_init(rdev, 1024 * 1024);
2012
 
2012
 
2013
//	rdev->ih.ring_obj = NULL;
2013
	rdev->ih.ring_obj = NULL;
2014
//	r600_ih_ring_init(rdev, 64 * 1024);
2014
	r600_ih_ring_init(rdev, 64 * 1024);
2015
 
2015
 
2016
	r = r600_pcie_gart_init(rdev);
2016
	r = r600_pcie_gart_init(rdev);
2017
	if (r)
2017
	if (r)
2018
		return r;
2018
		return r;
2019
 
2019
 
2020
	rdev->accel_working = true;
2020
	rdev->accel_working = true;
2021
	r = r600_startup(rdev);
2021
	r = r600_startup(rdev);
2022
	if (r) {
2022
	if (r) {
2023
		dev_err(rdev->dev, "disabling GPU acceleration\n");
2023
		dev_err(rdev->dev, "disabling GPU acceleration\n");
2024
//		r600_suspend(rdev);
2024
//		r600_suspend(rdev);
2025
//		r600_wb_fini(rdev);
2025
//		r600_wb_fini(rdev);
2026
//		radeon_ring_fini(rdev);
2026
//		radeon_ring_fini(rdev);
2027
		r600_pcie_gart_fini(rdev);
2027
		r600_pcie_gart_fini(rdev);
2028
		rdev->accel_working = false;
2028
		rdev->accel_working = false;
2029
	}
2029
	}
2030
	if (rdev->accel_working) {
2030
	if (rdev->accel_working) {
2031
//		r = radeon_ib_pool_init(rdev);
2031
//		r = radeon_ib_pool_init(rdev);
2032
//		if (r) {
2032
//		if (r) {
2033
//			DRM_ERROR("radeon: failled initializing IB pool (%d).\n", r);
2033
//			DRM_ERROR("radeon: failled initializing IB pool (%d).\n", r);
2034
//			rdev->accel_working = false;
2034
//			rdev->accel_working = false;
2035
//		}
2035
//		}
2036
//		r = r600_ib_test(rdev);
2036
//		r = r600_ib_test(rdev);
2037
//		if (r) {
2037
//		if (r) {
2038
//			DRM_ERROR("radeon: failled testing IB (%d).\n", r);
2038
//			DRM_ERROR("radeon: failled testing IB (%d).\n", r);
2039
//			rdev->accel_working = false;
2039
//			rdev->accel_working = false;
2040
//		}
2040
//		}
2041
	}
2041
	}
-
 
2042
	if (r)
-
 
2043
		return r; /* TODO error handling */
2042
	return 0;
2044
	return 0;
2043
}
2045
}
-
 
2046
 
-
 
2047
/*
-
 
2048
 * CS stuff
-
 
2049
 */
-
 
2050
void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
-
 
2051
{
-
 
2052
	/* FIXME: implement */
-
 
2053
	radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
-
 
2054
	radeon_ring_write(rdev,
-
 
2055
#ifdef __BIG_ENDIAN
-
 
2056
			  (2 << 0) |
-
 
2057
#endif
-
 
2058
			  (ib->gpu_addr & 0xFFFFFFFC));
-
 
2059
	radeon_ring_write(rdev, upper_32_bits(ib->gpu_addr) & 0xFF);
-
 
2060
	radeon_ring_write(rdev, ib->length_dw);
-
 
2061
}
-
 
2062
 
-
 
2063
int r600_ib_test(struct radeon_device *rdev)
-
 
2064
{
-
 
2065
	struct radeon_ib *ib;
-
 
2066
	uint32_t scratch;
-
 
2067
	uint32_t tmp = 0;
-
 
2068
	unsigned i;
-
 
2069
	int r;
-
 
2070
 
-
 
2071
	r = radeon_scratch_get(rdev, &scratch);
-
 
2072
	if (r) {
-
 
2073
		DRM_ERROR("radeon: failed to get scratch reg (%d).\n", r);
-
 
2074
		return r;
-
 
2075
	}
-
 
2076
	WREG32(scratch, 0xCAFEDEAD);
-
 
2077
	r = radeon_ib_get(rdev, &ib);
-
 
2078
	if (r) {
-
 
2079
		DRM_ERROR("radeon: failed to get ib (%d).\n", r);
-
 
2080
		return r;
-
 
2081
	}
-
 
2082
	ib->ptr[0] = PACKET3(PACKET3_SET_CONFIG_REG, 1);
-
 
2083
	ib->ptr[1] = ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
-
 
2084
	ib->ptr[2] = 0xDEADBEEF;
-
 
2085
	ib->ptr[3] = PACKET2(0);
-
 
2086
	ib->ptr[4] = PACKET2(0);
-
 
2087
	ib->ptr[5] = PACKET2(0);
-
 
2088
	ib->ptr[6] = PACKET2(0);
-
 
2089
	ib->ptr[7] = PACKET2(0);
-
 
2090
	ib->ptr[8] = PACKET2(0);
-
 
2091
	ib->ptr[9] = PACKET2(0);
-
 
2092
	ib->ptr[10] = PACKET2(0);
-
 
2093
	ib->ptr[11] = PACKET2(0);
-
 
2094
	ib->ptr[12] = PACKET2(0);
-
 
2095
	ib->ptr[13] = PACKET2(0);
-
 
2096
	ib->ptr[14] = PACKET2(0);
-
 
2097
	ib->ptr[15] = PACKET2(0);
-
 
2098
	ib->length_dw = 16;
-
 
2099
	r = radeon_ib_schedule(rdev, ib);
-
 
2100
	if (r) {
-
 
2101
		radeon_scratch_free(rdev, scratch);
-
 
2102
		radeon_ib_free(rdev, &ib);
-
 
2103
		DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
-
 
2104
		return r;
-
 
2105
	}
-
 
2106
	r = radeon_fence_wait(ib->fence, false);
-
 
2107
	if (r) {
-
 
2108
		DRM_ERROR("radeon: fence wait failed (%d).\n", r);
-
 
2109
		return r;
-
 
2110
	}
-
 
2111
	for (i = 0; i < rdev->usec_timeout; i++) {
-
 
2112
		tmp = RREG32(scratch);
-
 
2113
		if (tmp == 0xDEADBEEF)
-
 
2114
			break;
-
 
2115
		DRM_UDELAY(1);
-
 
2116
	}
-
 
2117
	if (i < rdev->usec_timeout) {
-
 
2118
		DRM_INFO("ib test succeeded in %u usecs\n", i);
-
 
2119
	} else {
-
 
2120
		DRM_ERROR("radeon: ib test failed (scratch(0x%04X)=0x%08X)\n",
-
 
2121
			  scratch, tmp);
-
 
2122
		r = -EINVAL;
-
 
2123
	}
-
 
2124
	radeon_scratch_free(rdev, scratch);
-
 
2125
	radeon_ib_free(rdev, &ib);
-
 
2126
	return r;
-
 
2127
}
-
 
2128
 
-
 
2129
/*
-
 
2130
 * Interrupts
-
 
2131
 *
-
 
2132
 * Interrupts use a ring buffer on r6xx/r7xx hardware.  It works pretty
-
 
2133
 * the same as the CP ring buffer, but in reverse.  Rather than the CPU
-
 
2134
 * writing to the ring and the GPU consuming, the GPU writes to the ring
-
 
2135
 * and host consumes.  As the host irq handler processes interrupts, it
-
 
2136
 * increments the rptr.  When the rptr catches up with the wptr, all the
-
 
2137
 * current interrupts have been processed.
-
 
2138
 */
-
 
2139
 
-
 
2140
void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size)
-
 
2141
{
-
 
2142
	u32 rb_bufsz;
-
 
2143
 
-
 
2144
	/* Align ring size */
-
 
2145
	rb_bufsz = drm_order(ring_size / 4);
-
 
2146
	ring_size = (1 << rb_bufsz) * 4;
-
 
2147
	rdev->ih.ring_size = ring_size;
-
 
2148
	rdev->ih.ptr_mask = rdev->ih.ring_size - 1;
-
 
2149
	rdev->ih.rptr = 0;
-
 
2150
}
-
 
2151
 
-
 
2152
static int r600_ih_ring_alloc(struct radeon_device *rdev)
-
 
2153
{
-
 
2154
	int r;
-
 
2155
 
-
 
2156
	/* Allocate ring buffer */
-
 
2157
	if (rdev->ih.ring_obj == NULL) {
-
 
2158
		r = radeon_bo_create(rdev, rdev->ih.ring_size,
-
 
2159
				     PAGE_SIZE, true,
-
 
2160
				     RADEON_GEM_DOMAIN_GTT,
-
 
2161
				     &rdev->ih.ring_obj);
-
 
2162
		if (r) {
-
 
2163
			DRM_ERROR("radeon: failed to create ih ring buffer (%d).\n", r);
-
 
2164
			return r;
-
 
2165
		}
-
 
2166
		r = radeon_bo_reserve(rdev->ih.ring_obj, false);
-
 
2167
		if (unlikely(r != 0))
-
 
2168
			return r;
-
 
2169
		r = radeon_bo_pin(rdev->ih.ring_obj,
-
 
2170
				  RADEON_GEM_DOMAIN_GTT,
-
 
2171
				  &rdev->ih.gpu_addr);
-
 
2172
		if (r) {
-
 
2173
			radeon_bo_unreserve(rdev->ih.ring_obj);
-
 
2174
			DRM_ERROR("radeon: failed to pin ih ring buffer (%d).\n", r);
-
 
2175
			return r;
-
 
2176
		}
-
 
2177
		r = radeon_bo_kmap(rdev->ih.ring_obj,
-
 
2178
				   (void **)&rdev->ih.ring);
-
 
2179
		radeon_bo_unreserve(rdev->ih.ring_obj);
-
 
2180
		if (r) {
-
 
2181
			DRM_ERROR("radeon: failed to map ih ring buffer (%d).\n", r);
-
 
2182
			return r;
-
 
2183
		}
-
 
2184
	}
-
 
2185
	return 0;
-
 
2186
}
-
 
2187
 
-
 
2188
static void r600_ih_ring_fini(struct radeon_device *rdev)
-
 
2189
{
-
 
2190
	int r;
-
 
2191
	if (rdev->ih.ring_obj) {
-
 
2192
		r = radeon_bo_reserve(rdev->ih.ring_obj, false);
-
 
2193
		if (likely(r == 0)) {
-
 
2194
			radeon_bo_kunmap(rdev->ih.ring_obj);
-
 
2195
			radeon_bo_unpin(rdev->ih.ring_obj);
-
 
2196
			radeon_bo_unreserve(rdev->ih.ring_obj);
-
 
2197
		}
-
 
2198
		radeon_bo_unref(&rdev->ih.ring_obj);
-
 
2199
		rdev->ih.ring = NULL;
-
 
2200
		rdev->ih.ring_obj = NULL;
-
 
2201
	}
-
 
2202
}
-
 
2203
 
-
 
2204
void r600_rlc_stop(struct radeon_device *rdev)
-
 
2205
{
-
 
2206
 
-
 
2207
	if ((rdev->family >= CHIP_RV770) &&
-
 
2208
	    (rdev->family <= CHIP_RV740)) {
-
 
2209
		/* r7xx asics need to soft reset RLC before halting */
-
 
2210
		WREG32(SRBM_SOFT_RESET, SOFT_RESET_RLC);
-
 
2211
		RREG32(SRBM_SOFT_RESET);
-
 
2212
		udelay(15000);
-
 
2213
		WREG32(SRBM_SOFT_RESET, 0);
-
 
2214
		RREG32(SRBM_SOFT_RESET);
-
 
2215
	}
-
 
2216
 
-
 
2217
	WREG32(RLC_CNTL, 0);
-
 
2218
}
-
 
2219
 
-
 
2220
static void r600_rlc_start(struct radeon_device *rdev)
-
 
2221
{
-
 
2222
	WREG32(RLC_CNTL, RLC_ENABLE);
-
 
2223
}
-
 
2224
 
-
 
2225
static int r600_rlc_init(struct radeon_device *rdev)
-
 
2226
{
-
 
2227
	u32 i;
-
 
2228
	const __be32 *fw_data;
-
 
2229
 
-
 
2230
	if (!rdev->rlc_fw)
-
 
2231
		return -EINVAL;
-
 
2232
 
-
 
2233
	r600_rlc_stop(rdev);
-
 
2234
 
-
 
2235
	WREG32(RLC_HB_BASE, 0);
-
 
2236
	WREG32(RLC_HB_CNTL, 0);
-
 
2237
	WREG32(RLC_HB_RPTR, 0);
-
 
2238
	WREG32(RLC_HB_WPTR, 0);
-
 
2239
	if (rdev->family <= CHIP_CAICOS) {
-
 
2240
		WREG32(RLC_HB_WPTR_LSB_ADDR, 0);
-
 
2241
		WREG32(RLC_HB_WPTR_MSB_ADDR, 0);
-
 
2242
	}
-
 
2243
	WREG32(RLC_MC_CNTL, 0);
-
 
2244
	WREG32(RLC_UCODE_CNTL, 0);
-
 
2245
 
-
 
2246
	fw_data = (const __be32 *)rdev->rlc_fw->data;
-
 
2247
	if (rdev->family >= CHIP_CAYMAN) {
-
 
2248
		for (i = 0; i < CAYMAN_RLC_UCODE_SIZE; i++) {
-
 
2249
			WREG32(RLC_UCODE_ADDR, i);
-
 
2250
			WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
-
 
2251
		}
-
 
2252
	} else if (rdev->family >= CHIP_CEDAR) {
-
 
2253
		for (i = 0; i < EVERGREEN_RLC_UCODE_SIZE; i++) {
-
 
2254
			WREG32(RLC_UCODE_ADDR, i);
-
 
2255
			WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
-
 
2256
		}
-
 
2257
	} else if (rdev->family >= CHIP_RV770) {
-
 
2258
		for (i = 0; i < R700_RLC_UCODE_SIZE; i++) {
-
 
2259
			WREG32(RLC_UCODE_ADDR, i);
-
 
2260
			WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
-
 
2261
		}
-
 
2262
	} else {
-
 
2263
		for (i = 0; i < RLC_UCODE_SIZE; i++) {
-
 
2264
			WREG32(RLC_UCODE_ADDR, i);
-
 
2265
			WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
-
 
2266
		}
-
 
2267
	}
-
 
2268
	WREG32(RLC_UCODE_ADDR, 0);
-
 
2269
 
-
 
2270
	r600_rlc_start(rdev);
-
 
2271
 
-
 
2272
	return 0;
-
 
2273
}
-
 
2274
 
-
 
2275
static void r600_enable_interrupts(struct radeon_device *rdev)
-
 
2276
{
-
 
2277
	u32 ih_cntl = RREG32(IH_CNTL);
-
 
2278
	u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
-
 
2279
 
-
 
2280
	ih_cntl |= ENABLE_INTR;
-
 
2281
	ih_rb_cntl |= IH_RB_ENABLE;
-
 
2282
	WREG32(IH_CNTL, ih_cntl);
-
 
2283
	WREG32(IH_RB_CNTL, ih_rb_cntl);
-
 
2284
	rdev->ih.enabled = true;
-
 
2285
}
-
 
2286
 
-
 
2287
void r600_disable_interrupts(struct radeon_device *rdev)
-
 
2288
{
-
 
2289
	u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
-
 
2290
	u32 ih_cntl = RREG32(IH_CNTL);
-
 
2291
 
-
 
2292
	ih_rb_cntl &= ~IH_RB_ENABLE;
-
 
2293
	ih_cntl &= ~ENABLE_INTR;
-
 
2294
	WREG32(IH_RB_CNTL, ih_rb_cntl);
-
 
2295
	WREG32(IH_CNTL, ih_cntl);
-
 
2296
	/* set rptr, wptr to 0 */
-
 
2297
	WREG32(IH_RB_RPTR, 0);
-
 
2298
	WREG32(IH_RB_WPTR, 0);
-
 
2299
	rdev->ih.enabled = false;
-
 
2300
	rdev->ih.wptr = 0;
-
 
2301
	rdev->ih.rptr = 0;
-
 
2302
}
2044
 
2303
 
2045
static void r600_disable_interrupt_state(struct radeon_device *rdev)
2304
static void r600_disable_interrupt_state(struct radeon_device *rdev)
2046
{
2305
{
2047
	u32 tmp;
2306
	u32 tmp;
2048
 
2307
 
2049
	WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
2308
	WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
2050
	WREG32(GRBM_INT_CNTL, 0);
2309
	WREG32(GRBM_INT_CNTL, 0);
2051
	WREG32(DxMODE_INT_MASK, 0);
2310
	WREG32(DxMODE_INT_MASK, 0);
2052
	WREG32(D1GRPH_INTERRUPT_CONTROL, 0);
2311
	WREG32(D1GRPH_INTERRUPT_CONTROL, 0);
2053
	WREG32(D2GRPH_INTERRUPT_CONTROL, 0);
2312
	WREG32(D2GRPH_INTERRUPT_CONTROL, 0);
2054
	if (ASIC_IS_DCE3(rdev)) {
2313
	if (ASIC_IS_DCE3(rdev)) {
2055
		WREG32(DCE3_DACA_AUTODETECT_INT_CONTROL, 0);
2314
		WREG32(DCE3_DACA_AUTODETECT_INT_CONTROL, 0);
2056
		WREG32(DCE3_DACB_AUTODETECT_INT_CONTROL, 0);
2315
		WREG32(DCE3_DACB_AUTODETECT_INT_CONTROL, 0);
2057
		tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2316
		tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2058
		WREG32(DC_HPD1_INT_CONTROL, tmp);
2317
		WREG32(DC_HPD1_INT_CONTROL, tmp);
2059
		tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2318
		tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2060
		WREG32(DC_HPD2_INT_CONTROL, tmp);
2319
		WREG32(DC_HPD2_INT_CONTROL, tmp);
2061
		tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2320
		tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2062
		WREG32(DC_HPD3_INT_CONTROL, tmp);
2321
		WREG32(DC_HPD3_INT_CONTROL, tmp);
2063
		tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2322
		tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2064
		WREG32(DC_HPD4_INT_CONTROL, tmp);
2323
		WREG32(DC_HPD4_INT_CONTROL, tmp);
2065
		if (ASIC_IS_DCE32(rdev)) {
2324
		if (ASIC_IS_DCE32(rdev)) {
2066
			tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2325
			tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2067
			WREG32(DC_HPD5_INT_CONTROL, tmp);
2326
			WREG32(DC_HPD5_INT_CONTROL, tmp);
2068
			tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2327
			tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2069
			WREG32(DC_HPD6_INT_CONTROL, tmp);
2328
			WREG32(DC_HPD6_INT_CONTROL, tmp);
2070
		}
2329
		}
2071
	} else {
2330
	} else {
2072
		WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
2331
		WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
2073
		WREG32(DACB_AUTODETECT_INT_CONTROL, 0);
2332
		WREG32(DACB_AUTODETECT_INT_CONTROL, 0);
2074
		tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
2333
		tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
2075
		WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
2334
		WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
2076
		tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
2335
		tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
2077
		WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
2336
		WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
2078
		tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
2337
		tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
2079
		WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
2338
		WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
2080
	}
2339
	}
2081
}
2340
}
-
 
2341
 
-
 
2342
int r600_irq_init(struct radeon_device *rdev)
-
 
2343
{
-
 
2344
	int ret = 0;
-
 
2345
	int rb_bufsz;
-
 
2346
	u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
-
 
2347
 
-
 
2348
	/* allocate ring */
-
 
2349
	ret = r600_ih_ring_alloc(rdev);
-
 
2350
	if (ret)
-
 
2351
		return ret;
-
 
2352
 
-
 
2353
	/* disable irqs */
-
 
2354
	r600_disable_interrupts(rdev);
-
 
2355
 
-
 
2356
	/* init rlc */
-
 
2357
	ret = r600_rlc_init(rdev);
-
 
2358
	if (ret) {
-
 
2359
		r600_ih_ring_fini(rdev);
-
 
2360
		return ret;
-
 
2361
	}
-
 
2362
 
-
 
2363
	/* setup interrupt control */
-
 
2364
	/* set dummy read address to ring address */
-
 
2365
	WREG32(INTERRUPT_CNTL2, rdev->ih.gpu_addr >> 8);
-
 
2366
	interrupt_cntl = RREG32(INTERRUPT_CNTL);
-
 
2367
	/* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi
-
 
2368
	 * IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN
-
 
2369
	 */
-
 
2370
	interrupt_cntl &= ~IH_DUMMY_RD_OVERRIDE;
-
 
2371
	/* IH_REQ_NONSNOOP_EN=1 if ring is in non-cacheable memory, e.g., vram */
-
 
2372
	interrupt_cntl &= ~IH_REQ_NONSNOOP_EN;
-
 
2373
	WREG32(INTERRUPT_CNTL, interrupt_cntl);
-
 
2374
 
-
 
2375
	WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8);
-
 
2376
	rb_bufsz = drm_order(rdev->ih.ring_size / 4);
-
 
2377
 
-
 
2378
	ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE |
-
 
2379
		      IH_WPTR_OVERFLOW_CLEAR |
-
 
2380
		      (rb_bufsz << 1));
-
 
2381
 
-
 
2382
	if (rdev->wb.enabled)
-
 
2383
		ih_rb_cntl |= IH_WPTR_WRITEBACK_ENABLE;
-
 
2384
 
-
 
2385
	/* set the writeback address whether it's enabled or not */
-
 
2386
	WREG32(IH_RB_WPTR_ADDR_LO, (rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFFFFFFFC);
-
 
2387
	WREG32(IH_RB_WPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFF);
-
 
2388
 
-
 
2389
	WREG32(IH_RB_CNTL, ih_rb_cntl);
-
 
2390
 
-
 
2391
	/* set rptr, wptr to 0 */
-
 
2392
	WREG32(IH_RB_RPTR, 0);
-
 
2393
	WREG32(IH_RB_WPTR, 0);
-
 
2394
 
-
 
2395
	/* Default settings for IH_CNTL (disabled at first) */
-
 
2396
	ih_cntl = MC_WRREQ_CREDIT(0x10) | MC_WR_CLEAN_CNT(0x10);
-
 
2397
	/* RPTR_REARM only works if msi's are enabled */
-
 
2398
	if (rdev->msi_enabled)
-
 
2399
		ih_cntl |= RPTR_REARM;
-
 
2400
 
-
 
2401
#ifdef __BIG_ENDIAN
-
 
2402
	ih_cntl |= IH_MC_SWAP(IH_MC_SWAP_32BIT);
-
 
2403
#endif
-
 
2404
	WREG32(IH_CNTL, ih_cntl);
-
 
2405
 
-
 
2406
	/* force the active interrupt state to all disabled */
-
 
2407
	if (rdev->family >= CHIP_CEDAR)
-
 
2408
		evergreen_disable_interrupt_state(rdev);
-
 
2409
	else
-
 
2410
		r600_disable_interrupt_state(rdev);
-
 
2411
 
-
 
2412
	/* enable irqs */
-
 
2413
	r600_enable_interrupts(rdev);
-
 
2414
 
-
 
2415
	return ret;
-
 
2416
}
-
 
2417
int r600_irq_set(struct radeon_device *rdev)
-
 
2418
{
-
 
2419
	u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE;
-
 
2420
	u32 mode_int = 0;
-
 
2421
	u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0;
-
 
2422
	u32 grbm_int_cntl = 0;
-
 
2423
	u32 hdmi1, hdmi2;
-
 
2424
	u32 d1grph = 0, d2grph = 0;
-
 
2425
 
-
 
2426
    ENTER();
-
 
2427
 
-
 
2428
	if (!rdev->irq.installed) {
-
 
2429
		WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
-
 
2430
		return -EINVAL;
-
 
2431
	}
-
 
2432
	/* don't enable anything if the ih is disabled */
-
 
2433
	if (!rdev->ih.enabled) {
-
 
2434
		r600_disable_interrupts(rdev);
-
 
2435
		/* force the active interrupt state to all disabled */
-
 
2436
		r600_disable_interrupt_state(rdev);
-
 
2437
		return 0;
-
 
2438
	}
-
 
2439
 
-
 
2440
	hdmi1 = RREG32(R600_HDMI_BLOCK1 + R600_HDMI_CNTL) & ~R600_HDMI_INT_EN;
-
 
2441
	if (ASIC_IS_DCE3(rdev)) {
-
 
2442
		hdmi2 = RREG32(R600_HDMI_BLOCK3 + R600_HDMI_CNTL) & ~R600_HDMI_INT_EN;
-
 
2443
		hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
-
 
2444
		hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
-
 
2445
		hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
-
 
2446
		hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN;
-
 
2447
		if (ASIC_IS_DCE32(rdev)) {
-
 
2448
			hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
-
 
2449
			hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
-
 
2450
		}
-
 
2451
	} else {
-
 
2452
		hdmi2 = RREG32(R600_HDMI_BLOCK2 + R600_HDMI_CNTL) & ~R600_HDMI_INT_EN;
-
 
2453
		hpd1 = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & ~DC_HPDx_INT_EN;
-
 
2454
		hpd2 = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & ~DC_HPDx_INT_EN;
-
 
2455
		hpd3 = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & ~DC_HPDx_INT_EN;
-
 
2456
	}
-
 
2457
 
-
 
2458
	if (rdev->irq.sw_int) {
-
 
2459
		DRM_DEBUG("r600_irq_set: sw int\n");
-
 
2460
		cp_int_cntl |= RB_INT_ENABLE;
-
 
2461
		cp_int_cntl |= TIME_STAMP_INT_ENABLE;
-
 
2462
	}
-
 
2463
	if (rdev->irq.crtc_vblank_int[0] ||
-
 
2464
	    rdev->irq.pflip[0]) {
-
 
2465
		DRM_DEBUG("r600_irq_set: vblank 0\n");
-
 
2466
		mode_int |= D1MODE_VBLANK_INT_MASK;
-
 
2467
	}
-
 
2468
	if (rdev->irq.crtc_vblank_int[1] ||
-
 
2469
	    rdev->irq.pflip[1]) {
-
 
2470
		DRM_DEBUG("r600_irq_set: vblank 1\n");
-
 
2471
		mode_int |= D2MODE_VBLANK_INT_MASK;
-
 
2472
	}
-
 
2473
	if (rdev->irq.hpd[0]) {
-
 
2474
		DRM_DEBUG("r600_irq_set: hpd 1\n");
-
 
2475
		hpd1 |= DC_HPDx_INT_EN;
-
 
2476
	}
-
 
2477
	if (rdev->irq.hpd[1]) {
-
 
2478
		DRM_DEBUG("r600_irq_set: hpd 2\n");
-
 
2479
		hpd2 |= DC_HPDx_INT_EN;
-
 
2480
	}
-
 
2481
	if (rdev->irq.hpd[2]) {
-
 
2482
		DRM_DEBUG("r600_irq_set: hpd 3\n");
-
 
2483
		hpd3 |= DC_HPDx_INT_EN;
-
 
2484
	}
-
 
2485
	if (rdev->irq.hpd[3]) {
-
 
2486
		DRM_DEBUG("r600_irq_set: hpd 4\n");
-
 
2487
		hpd4 |= DC_HPDx_INT_EN;
-
 
2488
	}
-
 
2489
	if (rdev->irq.hpd[4]) {
-
 
2490
		DRM_DEBUG("r600_irq_set: hpd 5\n");
-
 
2491
		hpd5 |= DC_HPDx_INT_EN;
-
 
2492
	}
-
 
2493
	if (rdev->irq.hpd[5]) {
-
 
2494
		DRM_DEBUG("r600_irq_set: hpd 6\n");
-
 
2495
		hpd6 |= DC_HPDx_INT_EN;
-
 
2496
	}
-
 
2497
	if (rdev->irq.hdmi[0]) {
-
 
2498
		DRM_DEBUG("r600_irq_set: hdmi 1\n");
-
 
2499
		hdmi1 |= R600_HDMI_INT_EN;
-
 
2500
	}
-
 
2501
	if (rdev->irq.hdmi[1]) {
-
 
2502
		DRM_DEBUG("r600_irq_set: hdmi 2\n");
-
 
2503
		hdmi2 |= R600_HDMI_INT_EN;
-
 
2504
	}
-
 
2505
	if (rdev->irq.gui_idle) {
-
 
2506
		DRM_DEBUG("gui idle\n");
-
 
2507
		grbm_int_cntl |= GUI_IDLE_INT_ENABLE;
-
 
2508
	}
-
 
2509
 
-
 
2510
	WREG32(CP_INT_CNTL, cp_int_cntl);
-
 
2511
	WREG32(DxMODE_INT_MASK, mode_int);
-
 
2512
	WREG32(D1GRPH_INTERRUPT_CONTROL, d1grph);
-
 
2513
	WREG32(D2GRPH_INTERRUPT_CONTROL, d2grph);
-
 
2514
	WREG32(GRBM_INT_CNTL, grbm_int_cntl);
-
 
2515
	WREG32(R600_HDMI_BLOCK1 + R600_HDMI_CNTL, hdmi1);
-
 
2516
	if (ASIC_IS_DCE3(rdev)) {
-
 
2517
		WREG32(R600_HDMI_BLOCK3 + R600_HDMI_CNTL, hdmi2);
-
 
2518
		WREG32(DC_HPD1_INT_CONTROL, hpd1);
-
 
2519
		WREG32(DC_HPD2_INT_CONTROL, hpd2);
-
 
2520
		WREG32(DC_HPD3_INT_CONTROL, hpd3);
-
 
2521
		WREG32(DC_HPD4_INT_CONTROL, hpd4);
-
 
2522
		if (ASIC_IS_DCE32(rdev)) {
-
 
2523
			WREG32(DC_HPD5_INT_CONTROL, hpd5);
-
 
2524
			WREG32(DC_HPD6_INT_CONTROL, hpd6);
-
 
2525
		}
-
 
2526
	} else {
-
 
2527
		WREG32(R600_HDMI_BLOCK2 + R600_HDMI_CNTL, hdmi2);
-
 
2528
		WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, hpd1);
-
 
2529
		WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2);
-
 
2530
		WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, hpd3);
-
 
2531
	}
-
 
2532
 
-
 
2533
    LEAVE();
-
 
2534
 
-
 
2535
	return 0;
-
 
2536
}
-
 
2537
 
-
 
2538
static inline void r600_irq_ack(struct radeon_device *rdev)
-
 
2539
{
-
 
2540
	u32 tmp;
-
 
2541
 
-
 
2542
	if (ASIC_IS_DCE3(rdev)) {
-
 
2543
		rdev->irq.stat_regs.r600.disp_int = RREG32(DCE3_DISP_INTERRUPT_STATUS);
-
 
2544
		rdev->irq.stat_regs.r600.disp_int_cont = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE);
-
 
2545
		rdev->irq.stat_regs.r600.disp_int_cont2 = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE2);
-
 
2546
	} else {
-
 
2547
		rdev->irq.stat_regs.r600.disp_int = RREG32(DISP_INTERRUPT_STATUS);
-
 
2548
		rdev->irq.stat_regs.r600.disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
-
 
2549
		rdev->irq.stat_regs.r600.disp_int_cont2 = 0;
-
 
2550
	}
-
 
2551
	rdev->irq.stat_regs.r600.d1grph_int = RREG32(D1GRPH_INTERRUPT_STATUS);
-
 
2552
	rdev->irq.stat_regs.r600.d2grph_int = RREG32(D2GRPH_INTERRUPT_STATUS);
-
 
2553
 
-
 
2554
	if (rdev->irq.stat_regs.r600.d1grph_int & DxGRPH_PFLIP_INT_OCCURRED)
-
 
2555
		WREG32(D1GRPH_INTERRUPT_STATUS, DxGRPH_PFLIP_INT_CLEAR);
-
 
2556
	if (rdev->irq.stat_regs.r600.d2grph_int & DxGRPH_PFLIP_INT_OCCURRED)
-
 
2557
		WREG32(D2GRPH_INTERRUPT_STATUS, DxGRPH_PFLIP_INT_CLEAR);
-
 
2558
	if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VBLANK_INTERRUPT)
-
 
2559
		WREG32(D1MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK);
-
 
2560
	if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VLINE_INTERRUPT)
-
 
2561
		WREG32(D1MODE_VLINE_STATUS, DxMODE_VLINE_ACK);
-
 
2562
	if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VBLANK_INTERRUPT)
-
 
2563
		WREG32(D2MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK);
-
 
2564
	if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VLINE_INTERRUPT)
-
 
2565
		WREG32(D2MODE_VLINE_STATUS, DxMODE_VLINE_ACK);
-
 
2566
	if (rdev->irq.stat_regs.r600.disp_int & DC_HPD1_INTERRUPT) {
-
 
2567
		if (ASIC_IS_DCE3(rdev)) {
-
 
2568
			tmp = RREG32(DC_HPD1_INT_CONTROL);
-
 
2569
			tmp |= DC_HPDx_INT_ACK;
-
 
2570
			WREG32(DC_HPD1_INT_CONTROL, tmp);
-
 
2571
		} else {
-
 
2572
			tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL);
-
 
2573
			tmp |= DC_HPDx_INT_ACK;
-
 
2574
			WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
-
 
2575
		}
-
 
2576
	}
-
 
2577
	if (rdev->irq.stat_regs.r600.disp_int & DC_HPD2_INTERRUPT) {
-
 
2578
		if (ASIC_IS_DCE3(rdev)) {
-
 
2579
			tmp = RREG32(DC_HPD2_INT_CONTROL);
-
 
2580
			tmp |= DC_HPDx_INT_ACK;
-
 
2581
			WREG32(DC_HPD2_INT_CONTROL, tmp);
-
 
2582
		} else {
-
 
2583
			tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL);
-
 
2584
			tmp |= DC_HPDx_INT_ACK;
-
 
2585
			WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
-
 
2586
		}
-
 
2587
	}
-
 
2588
	if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD3_INTERRUPT) {
-
 
2589
		if (ASIC_IS_DCE3(rdev)) {
-
 
2590
			tmp = RREG32(DC_HPD3_INT_CONTROL);
-
 
2591
			tmp |= DC_HPDx_INT_ACK;
-
 
2592
			WREG32(DC_HPD3_INT_CONTROL, tmp);
-
 
2593
		} else {
-
 
2594
			tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL);
-
 
2595
			tmp |= DC_HPDx_INT_ACK;
-
 
2596
			WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
-
 
2597
		}
-
 
2598
	}
-
 
2599
	if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD4_INTERRUPT) {
-
 
2600
		tmp = RREG32(DC_HPD4_INT_CONTROL);
-
 
2601
		tmp |= DC_HPDx_INT_ACK;
-
 
2602
		WREG32(DC_HPD4_INT_CONTROL, tmp);
-
 
2603
	}
-
 
2604
	if (ASIC_IS_DCE32(rdev)) {
-
 
2605
		if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD5_INTERRUPT) {
-
 
2606
			tmp = RREG32(DC_HPD5_INT_CONTROL);
-
 
2607
			tmp |= DC_HPDx_INT_ACK;
-
 
2608
			WREG32(DC_HPD5_INT_CONTROL, tmp);
-
 
2609
		}
-
 
2610
		if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT) {
-
 
2611
			tmp = RREG32(DC_HPD5_INT_CONTROL);
-
 
2612
			tmp |= DC_HPDx_INT_ACK;
-
 
2613
			WREG32(DC_HPD6_INT_CONTROL, tmp);
-
 
2614
		}
-
 
2615
	}
-
 
2616
	if (RREG32(R600_HDMI_BLOCK1 + R600_HDMI_STATUS) & R600_HDMI_INT_PENDING) {
-
 
2617
		WREG32_P(R600_HDMI_BLOCK1 + R600_HDMI_CNTL, R600_HDMI_INT_ACK, ~R600_HDMI_INT_ACK);
-
 
2618
	}
-
 
2619
	if (ASIC_IS_DCE3(rdev)) {
-
 
2620
		if (RREG32(R600_HDMI_BLOCK3 + R600_HDMI_STATUS) & R600_HDMI_INT_PENDING) {
-
 
2621
			WREG32_P(R600_HDMI_BLOCK3 + R600_HDMI_CNTL, R600_HDMI_INT_ACK, ~R600_HDMI_INT_ACK);
-
 
2622
		}
-
 
2623
	} else {
-
 
2624
		if (RREG32(R600_HDMI_BLOCK2 + R600_HDMI_STATUS) & R600_HDMI_INT_PENDING) {
-
 
2625
			WREG32_P(R600_HDMI_BLOCK2 + R600_HDMI_CNTL, R600_HDMI_INT_ACK, ~R600_HDMI_INT_ACK);
-
 
2626
		}
-
 
2627
	}
-
 
2628
}
-
 
2629
 
-
 
2630
static inline u32 r600_get_ih_wptr(struct radeon_device *rdev)
-
 
2631
{
-
 
2632
	u32 wptr, tmp;
-
 
2633
 
-
 
2634
	if (rdev->wb.enabled)
-
 
2635
		wptr = le32_to_cpu(rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4]);
-
 
2636
	else
-
 
2637
		wptr = RREG32(IH_RB_WPTR);
-
 
2638
 
-
 
2639
	if (wptr & RB_OVERFLOW) {
-
 
2640
		/* When a ring buffer overflow happen start parsing interrupt
-
 
2641
		 * from the last not overwritten vector (wptr + 16). Hopefully
-
 
2642
		 * this should allow us to catchup.
-
 
2643
		 */
-
 
2644
		dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n",
-
 
2645
			wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask);
-
 
2646
		rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
-
 
2647
		tmp = RREG32(IH_RB_CNTL);
-
 
2648
		tmp |= IH_WPTR_OVERFLOW_CLEAR;
-
 
2649
		WREG32(IH_RB_CNTL, tmp);
-
 
2650
	}
-
 
2651
	return (wptr & rdev->ih.ptr_mask);
-
 
2652
}
-
 
2653
 
-
 
2654
/*        r600 IV Ring
-
 
2655
 * Each IV ring entry is 128 bits:
-
 
2656
 * [7:0]    - interrupt source id
-
 
2657
 * [31:8]   - reserved
-
 
2658
 * [59:32]  - interrupt source data
-
 
2659
 * [127:60]  - reserved
-
 
2660
 *
-
 
2661
 * The basic interrupt vector entries
-
 
2662
 * are decoded as follows:
-
 
2663
 * src_id  src_data  description
-
 
2664
 *      1         0  D1 Vblank
-
 
2665
 *      1         1  D1 Vline
-
 
2666
 *      5         0  D2 Vblank
-
 
2667
 *      5         1  D2 Vline
-
 
2668
 *     19         0  FP Hot plug detection A
-
 
2669
 *     19         1  FP Hot plug detection B
-
 
2670
 *     19         2  DAC A auto-detection
-
 
2671
 *     19         3  DAC B auto-detection
-
 
2672
 *     21         4  HDMI block A
-
 
2673
 *     21         5  HDMI block B
-
 
2674
 *    176         -  CP_INT RB
-
 
2675
 *    177         -  CP_INT IB1
-
 
2676
 *    178         -  CP_INT IB2
-
 
2677
 *    181         -  EOP Interrupt
-
 
2678
 *    233         -  GUI Idle
-
 
2679
 *
-
 
2680
 * Note, these are based on r600 and may need to be
-
 
2681
 * adjusted or added to on newer asics
-
 
2682
 */
-
 
2683
 
-
 
2684
int r600_irq_process(struct radeon_device *rdev)
-
 
2685
{
-
 
2686
	u32 wptr;
-
 
2687
	u32 rptr;
-
 
2688
	u32 src_id, src_data;
-
 
2689
	u32 ring_index;
-
 
2690
	unsigned long flags;
-
 
2691
	bool queue_hotplug = false;
-
 
2692
 
-
 
2693
	if (!rdev->ih.enabled || rdev->shutdown)
-
 
2694
		return IRQ_NONE;
-
 
2695
 
-
 
2696
	wptr = r600_get_ih_wptr(rdev);
-
 
2697
	rptr = rdev->ih.rptr;
-
 
2698
	DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
-
 
2699
 
-
 
2700
	spin_lock_irqsave(&rdev->ih.lock, flags);
-
 
2701
 
-
 
2702
	if (rptr == wptr) {
-
 
2703
		spin_unlock_irqrestore(&rdev->ih.lock, flags);
-
 
2704
		return IRQ_NONE;
-
 
2705
	}
-
 
2706
 
-
 
2707
restart_ih:
-
 
2708
	/* display interrupts */
-
 
2709
	r600_irq_ack(rdev);
-
 
2710
 
-
 
2711
	rdev->ih.wptr = wptr;
-
 
2712
	while (rptr != wptr) {
-
 
2713
		/* wptr/rptr are in bytes! */
-
 
2714
		ring_index = rptr / 4;
-
 
2715
		src_id = le32_to_cpu(rdev->ih.ring[ring_index]) & 0xff;
-
 
2716
		src_data = le32_to_cpu(rdev->ih.ring[ring_index + 1]) & 0xfffffff;
-
 
2717
 
-
 
2718
		switch (src_id) {
-
 
2719
		case 1: /* D1 vblank/vline */
-
 
2720
			switch (src_data) {
-
 
2721
			case 0: /* D1 vblank */
-
 
2722
				if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VBLANK_INTERRUPT) {
-
 
2723
					if (rdev->irq.crtc_vblank_int[0]) {
-
 
2724
//                       drm_handle_vblank(rdev->ddev, 0);
-
 
2725
						rdev->pm.vblank_sync = true;
-
 
2726
//                       wake_up(&rdev->irq.vblank_queue);
-
 
2727
					}
-
 
2728
//                   if (rdev->irq.pflip[0])
-
 
2729
//                       radeon_crtc_handle_flip(rdev, 0);
-
 
2730
					rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
-
 
2731
					DRM_DEBUG("IH: D1 vblank\n");
-
 
2732
				}
-
 
2733
				break;
-
 
2734
			case 1: /* D1 vline */
-
 
2735
				if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VLINE_INTERRUPT) {
-
 
2736
					rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VLINE_INTERRUPT;
-
 
2737
					DRM_DEBUG("IH: D1 vline\n");
-
 
2738
				}
-
 
2739
				break;
-
 
2740
			default:
-
 
2741
				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
-
 
2742
				break;
-
 
2743
			}
-
 
2744
			break;
-
 
2745
		case 5: /* D2 vblank/vline */
-
 
2746
			switch (src_data) {
-
 
2747
			case 0: /* D2 vblank */
-
 
2748
				if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VBLANK_INTERRUPT) {
-
 
2749
					if (rdev->irq.crtc_vblank_int[1]) {
-
 
2750
//                       drm_handle_vblank(rdev->ddev, 1);
-
 
2751
						rdev->pm.vblank_sync = true;
-
 
2752
//                       wake_up(&rdev->irq.vblank_queue);
-
 
2753
					}
-
 
2754
//                   if (rdev->irq.pflip[1])
-
 
2755
//                       radeon_crtc_handle_flip(rdev, 1);
-
 
2756
					rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VBLANK_INTERRUPT;
-
 
2757
					DRM_DEBUG("IH: D2 vblank\n");
-
 
2758
				}
-
 
2759
				break;
-
 
2760
			case 1: /* D1 vline */
-
 
2761
				if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VLINE_INTERRUPT) {
-
 
2762
					rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VLINE_INTERRUPT;
-
 
2763
					DRM_DEBUG("IH: D2 vline\n");
-
 
2764
				}
-
 
2765
				break;
-
 
2766
			default:
-
 
2767
				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
-
 
2768
				break;
-
 
2769
			}
-
 
2770
			break;
-
 
2771
		case 19: /* HPD/DAC hotplug */
-
 
2772
			switch (src_data) {
-
 
2773
			case 0:
-
 
2774
				if (rdev->irq.stat_regs.r600.disp_int & DC_HPD1_INTERRUPT) {
-
 
2775
					rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD1_INTERRUPT;
-
 
2776
					queue_hotplug = true;
-
 
2777
					DRM_DEBUG("IH: HPD1\n");
-
 
2778
				}
-
 
2779
				break;
-
 
2780
			case 1:
-
 
2781
				if (rdev->irq.stat_regs.r600.disp_int & DC_HPD2_INTERRUPT) {
-
 
2782
					rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD2_INTERRUPT;
-
 
2783
					queue_hotplug = true;
-
 
2784
					DRM_DEBUG("IH: HPD2\n");
-
 
2785
				}
-
 
2786
				break;
-
 
2787
			case 4:
-
 
2788
				if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD3_INTERRUPT) {
-
 
2789
					rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD3_INTERRUPT;
-
 
2790
					queue_hotplug = true;
-
 
2791
					DRM_DEBUG("IH: HPD3\n");
-
 
2792
				}
-
 
2793
				break;
-
 
2794
			case 5:
-
 
2795
				if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD4_INTERRUPT) {
-
 
2796
					rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD4_INTERRUPT;
-
 
2797
					queue_hotplug = true;
-
 
2798
					DRM_DEBUG("IH: HPD4\n");
-
 
2799
				}
-
 
2800
				break;
-
 
2801
			case 10:
-
 
2802
				if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD5_INTERRUPT) {
-
 
2803
					rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD5_INTERRUPT;
-
 
2804
					queue_hotplug = true;
-
 
2805
					DRM_DEBUG("IH: HPD5\n");
-
 
2806
				}
-
 
2807
				break;
-
 
2808
			case 12:
-
 
2809
				if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT) {
-
 
2810
					rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD6_INTERRUPT;
-
 
2811
					queue_hotplug = true;
-
 
2812
					DRM_DEBUG("IH: HPD6\n");
-
 
2813
				}
-
 
2814
				break;
-
 
2815
			default:
-
 
2816
				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
-
 
2817
				break;
-
 
2818
			}
-
 
2819
			break;
-
 
2820
		case 21: /* HDMI */
-
 
2821
			DRM_DEBUG("IH: HDMI: 0x%x\n", src_data);
-
 
2822
//           r600_audio_schedule_polling(rdev);
-
 
2823
			break;
-
 
2824
		case 176: /* CP_INT in ring buffer */
-
 
2825
		case 177: /* CP_INT in IB1 */
-
 
2826
		case 178: /* CP_INT in IB2 */
-
 
2827
			DRM_DEBUG("IH: CP int: 0x%08x\n", src_data);
-
 
2828
//           radeon_fence_process(rdev);
-
 
2829
			break;
-
 
2830
		case 181: /* CP EOP event */
-
 
2831
			DRM_DEBUG("IH: CP EOP\n");
-
 
2832
//           radeon_fence_process(rdev);
-
 
2833
			break;
-
 
2834
		case 233: /* GUI IDLE */
-
 
2835
			DRM_DEBUG("IH: GUI idle\n");
-
 
2836
			rdev->pm.gui_idle = true;
-
 
2837
//           wake_up(&rdev->irq.idle_queue);
-
 
2838
			break;
-
 
2839
		default:
-
 
2840
			DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
-
 
2841
			break;
-
 
2842
		}
-
 
2843
 
-
 
2844
		/* wptr/rptr are in bytes! */
-
 
2845
		rptr += 16;
-
 
2846
		rptr &= rdev->ih.ptr_mask;
-
 
2847
	}
-
 
2848
	/* make sure wptr hasn't changed while processing */
-
 
2849
	wptr = r600_get_ih_wptr(rdev);
-
 
2850
	if (wptr != rdev->ih.wptr)
-
 
2851
		goto restart_ih;
2082
 
2852
//	if (queue_hotplug)
2083
 
2853
//		schedule_work(&rdev->hotplug_work);
2084
 
2854
	rdev->ih.rptr = rptr;
2085
 
2855
	WREG32(IH_RB_RPTR, rdev->ih.rptr);
2086
 
2856
	spin_unlock_irqrestore(&rdev->ih.lock, flags);
2087
 
2857
	return IRQ_HANDLED;
2088
 
2858
}
2089
 
2859
 
2090
/*
2860
/*
2091
 * Debugfs info
2861
 * Debugfs info
2092
 */
2862
 */
2093
#if defined(CONFIG_DEBUG_FS)
2863
#if defined(CONFIG_DEBUG_FS)
2094
 
2864
 
2095
static int r600_debugfs_cp_ring_info(struct seq_file *m, void *data)
2865
static int r600_debugfs_cp_ring_info(struct seq_file *m, void *data)
2096
{
2866
{
2097
	struct drm_info_node *node = (struct drm_info_node *) m->private;
2867
	struct drm_info_node *node = (struct drm_info_node *) m->private;
2098
	struct drm_device *dev = node->minor->dev;
2868
	struct drm_device *dev = node->minor->dev;
2099
	struct radeon_device *rdev = dev->dev_private;
2869
	struct radeon_device *rdev = dev->dev_private;
2100
	unsigned count, i, j;
2870
	unsigned count, i, j;
2101
 
2871
 
2102
	radeon_ring_free_size(rdev);
2872
	radeon_ring_free_size(rdev);
2103
	count = (rdev->cp.ring_size / 4) - rdev->cp.ring_free_dw;
2873
	count = (rdev->cp.ring_size / 4) - rdev->cp.ring_free_dw;
2104
	seq_printf(m, "CP_STAT 0x%08x\n", RREG32(CP_STAT));
2874
	seq_printf(m, "CP_STAT 0x%08x\n", RREG32(CP_STAT));
2105
	seq_printf(m, "CP_RB_WPTR 0x%08x\n", RREG32(CP_RB_WPTR));
2875
	seq_printf(m, "CP_RB_WPTR 0x%08x\n", RREG32(CP_RB_WPTR));
2106
	seq_printf(m, "CP_RB_RPTR 0x%08x\n", RREG32(CP_RB_RPTR));
2876
	seq_printf(m, "CP_RB_RPTR 0x%08x\n", RREG32(CP_RB_RPTR));
2107
	seq_printf(m, "driver's copy of the CP_RB_WPTR 0x%08x\n", rdev->cp.wptr);
2877
	seq_printf(m, "driver's copy of the CP_RB_WPTR 0x%08x\n", rdev->cp.wptr);
2108
	seq_printf(m, "driver's copy of the CP_RB_RPTR 0x%08x\n", rdev->cp.rptr);
2878
	seq_printf(m, "driver's copy of the CP_RB_RPTR 0x%08x\n", rdev->cp.rptr);
2109
	seq_printf(m, "%u free dwords in ring\n", rdev->cp.ring_free_dw);
2879
	seq_printf(m, "%u free dwords in ring\n", rdev->cp.ring_free_dw);
2110
	seq_printf(m, "%u dwords in ring\n", count);
2880
	seq_printf(m, "%u dwords in ring\n", count);
2111
	i = rdev->cp.rptr;
2881
	i = rdev->cp.rptr;
2112
	for (j = 0; j <= count; j++) {
2882
	for (j = 0; j <= count; j++) {
2113
		seq_printf(m, "r[%04d]=0x%08x\n", i, rdev->cp.ring[i]);
2883
		seq_printf(m, "r[%04d]=0x%08x\n", i, rdev->cp.ring[i]);
2114
		i = (i + 1) & rdev->cp.ptr_mask;
2884
		i = (i + 1) & rdev->cp.ptr_mask;
2115
	}
2885
	}
2116
	return 0;
2886
	return 0;
2117
}
2887
}
2118
 
2888
 
2119
static int r600_debugfs_mc_info(struct seq_file *m, void *data)
2889
static int r600_debugfs_mc_info(struct seq_file *m, void *data)
2120
{
2890
{
2121
	struct drm_info_node *node = (struct drm_info_node *) m->private;
2891
	struct drm_info_node *node = (struct drm_info_node *) m->private;
2122
	struct drm_device *dev = node->minor->dev;
2892
	struct drm_device *dev = node->minor->dev;
2123
	struct radeon_device *rdev = dev->dev_private;
2893
	struct radeon_device *rdev = dev->dev_private;
2124
 
2894
 
2125
	DREG32_SYS(m, rdev, R_000E50_SRBM_STATUS);
2895
	DREG32_SYS(m, rdev, R_000E50_SRBM_STATUS);
2126
	DREG32_SYS(m, rdev, VM_L2_STATUS);
2896
	DREG32_SYS(m, rdev, VM_L2_STATUS);
2127
	return 0;
2897
	return 0;
2128
}
2898
}
2129
 
2899
 
2130
static struct drm_info_list r600_mc_info_list[] = {
2900
static struct drm_info_list r600_mc_info_list[] = {
2131
	{"r600_mc_info", r600_debugfs_mc_info, 0, NULL},
2901
	{"r600_mc_info", r600_debugfs_mc_info, 0, NULL},
2132
	{"r600_ring_info", r600_debugfs_cp_ring_info, 0, NULL},
2902
	{"r600_ring_info", r600_debugfs_cp_ring_info, 0, NULL},
2133
};
2903
};
2134
#endif
2904
#endif
2135
 
2905
 
2136
int r600_debugfs_mc_info_init(struct radeon_device *rdev)
2906
int r600_debugfs_mc_info_init(struct radeon_device *rdev)
2137
{
2907
{
2138
#if defined(CONFIG_DEBUG_FS)
2908
#if defined(CONFIG_DEBUG_FS)
2139
	return radeon_debugfs_add_files(rdev, r600_mc_info_list, ARRAY_SIZE(r600_mc_info_list));
2909
	return radeon_debugfs_add_files(rdev, r600_mc_info_list, ARRAY_SIZE(r600_mc_info_list));
2140
#else
2910
#else
2141
	return 0;
2911
	return 0;
2142
#endif
2912
#endif
2143
}
2913
}
2144
 
2914
 
2145
/**
2915
/**
2146
 * r600_ioctl_wait_idle - flush host path cache on wait idle ioctl
2916
 * r600_ioctl_wait_idle - flush host path cache on wait idle ioctl
2147
 * rdev: radeon device structure
2917
 * rdev: radeon device structure
2148
 * bo: buffer object struct which userspace is waiting for idle
2918
 * bo: buffer object struct which userspace is waiting for idle
2149
 *
2919
 *
2150
 * Some R6XX/R7XX doesn't seems to take into account HDP flush performed
2920
 * Some R6XX/R7XX doesn't seems to take into account HDP flush performed
2151
 * through ring buffer, this leads to corruption in rendering, see
2921
 * through ring buffer, this leads to corruption in rendering, see
2152
 * http://bugzilla.kernel.org/show_bug.cgi?id=15186 to avoid this we
2922
 * http://bugzilla.kernel.org/show_bug.cgi?id=15186 to avoid this we
2153
 * directly perform HDP flush by writing register through MMIO.
2923
 * directly perform HDP flush by writing register through MMIO.
2154
 */
2924
 */
2155
void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo)
2925
void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo)
2156
{
2926
{
2157
	/* r7xx hw bug.  write to HDP_DEBUG1 followed by fb read
2927
	/* r7xx hw bug.  write to HDP_DEBUG1 followed by fb read
2158
	 * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL.
2928
	 * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL.
2159
	 * This seems to cause problems on some AGP cards. Just use the old
2929
	 * This seems to cause problems on some AGP cards. Just use the old
2160
	 * method for them.
2930
	 * method for them.
2161
	 */
2931
	 */
2162
	if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) &&
2932
	if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) &&
2163
	    rdev->vram_scratch.ptr && !(rdev->flags & RADEON_IS_AGP)) {
2933
	    rdev->vram_scratch.ptr && !(rdev->flags & RADEON_IS_AGP)) {
2164
		void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
2934
		void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
2165
		u32 tmp;
2935
		u32 tmp;
2166
 
2936
 
2167
		WREG32(HDP_DEBUG1, 0);
2937
		WREG32(HDP_DEBUG1, 0);
2168
		tmp = readl((void __iomem *)ptr);
2938
		tmp = readl((void __iomem *)ptr);
2169
	} else
2939
	} else
2170
	WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
2940
	WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
2171
}
2941
}
2172
 
2942
 
2173
void r600_set_pcie_lanes(struct radeon_device *rdev, int lanes)
2943
void r600_set_pcie_lanes(struct radeon_device *rdev, int lanes)
2174
{
2944
{
2175
	u32 link_width_cntl, mask, target_reg;
2945
	u32 link_width_cntl, mask, target_reg;
2176
 
2946
 
2177
	if (rdev->flags & RADEON_IS_IGP)
2947
	if (rdev->flags & RADEON_IS_IGP)
2178
		return;
2948
		return;
2179
 
2949
 
2180
	if (!(rdev->flags & RADEON_IS_PCIE))
2950
	if (!(rdev->flags & RADEON_IS_PCIE))
2181
		return;
2951
		return;
2182
 
2952
 
2183
	/* x2 cards have a special sequence */
2953
	/* x2 cards have a special sequence */
2184
	if (ASIC_IS_X2(rdev))
2954
	if (ASIC_IS_X2(rdev))
2185
		return;
2955
		return;
2186
 
2956
 
2187
	/* FIXME wait for idle */
2957
	/* FIXME wait for idle */
2188
 
2958
 
2189
	switch (lanes) {
2959
	switch (lanes) {
2190
	case 0:
2960
	case 0:
2191
		mask = RADEON_PCIE_LC_LINK_WIDTH_X0;
2961
		mask = RADEON_PCIE_LC_LINK_WIDTH_X0;
2192
		break;
2962
		break;
2193
	case 1:
2963
	case 1:
2194
		mask = RADEON_PCIE_LC_LINK_WIDTH_X1;
2964
		mask = RADEON_PCIE_LC_LINK_WIDTH_X1;
2195
		break;
2965
		break;
2196
	case 2:
2966
	case 2:
2197
		mask = RADEON_PCIE_LC_LINK_WIDTH_X2;
2967
		mask = RADEON_PCIE_LC_LINK_WIDTH_X2;
2198
		break;
2968
		break;
2199
	case 4:
2969
	case 4:
2200
		mask = RADEON_PCIE_LC_LINK_WIDTH_X4;
2970
		mask = RADEON_PCIE_LC_LINK_WIDTH_X4;
2201
		break;
2971
		break;
2202
	case 8:
2972
	case 8:
2203
		mask = RADEON_PCIE_LC_LINK_WIDTH_X8;
2973
		mask = RADEON_PCIE_LC_LINK_WIDTH_X8;
2204
		break;
2974
		break;
2205
	case 12:
2975
	case 12:
2206
		mask = RADEON_PCIE_LC_LINK_WIDTH_X12;
2976
		mask = RADEON_PCIE_LC_LINK_WIDTH_X12;
2207
		break;
2977
		break;
2208
	case 16:
2978
	case 16:
2209
	default:
2979
	default:
2210
		mask = RADEON_PCIE_LC_LINK_WIDTH_X16;
2980
		mask = RADEON_PCIE_LC_LINK_WIDTH_X16;
2211
		break;
2981
		break;
2212
	}
2982
	}
2213
 
2983
 
2214
	link_width_cntl = RREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
2984
	link_width_cntl = RREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
2215
 
2985
 
2216
	if ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) ==
2986
	if ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) ==
2217
	    (mask << RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT))
2987
	    (mask << RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT))
2218
		return;
2988
		return;
2219
 
2989
 
2220
	if (link_width_cntl & R600_PCIE_LC_UPCONFIGURE_DIS)
2990
	if (link_width_cntl & R600_PCIE_LC_UPCONFIGURE_DIS)
2221
		return;
2991
		return;
2222
 
2992
 
2223
	link_width_cntl &= ~(RADEON_PCIE_LC_LINK_WIDTH_MASK |
2993
	link_width_cntl &= ~(RADEON_PCIE_LC_LINK_WIDTH_MASK |
2224
			     RADEON_PCIE_LC_RECONFIG_NOW |
2994
			     RADEON_PCIE_LC_RECONFIG_NOW |
2225
			     R600_PCIE_LC_RENEGOTIATE_EN |
2995
			     R600_PCIE_LC_RENEGOTIATE_EN |
2226
			     R600_PCIE_LC_RECONFIG_ARC_MISSING_ESCAPE);
2996
			     R600_PCIE_LC_RECONFIG_ARC_MISSING_ESCAPE);
2227
	link_width_cntl |= mask;
2997
	link_width_cntl |= mask;
2228
 
2998
 
2229
	WREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
2999
	WREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
2230
 
3000
 
2231
        /* some northbridges can renegotiate the link rather than requiring                                  
3001
        /* some northbridges can renegotiate the link rather than requiring                                  
2232
         * a complete re-config.                                                                             
3002
         * a complete re-config.                                                                             
2233
         * e.g., AMD 780/790 northbridges (pci ids: 0x5956, 0x5957, 0x5958, etc.)                            
3003
         * e.g., AMD 780/790 northbridges (pci ids: 0x5956, 0x5957, 0x5958, etc.)                            
2234
         */
3004
         */
2235
        if (link_width_cntl & R600_PCIE_LC_RENEGOTIATION_SUPPORT)
3005
        if (link_width_cntl & R600_PCIE_LC_RENEGOTIATION_SUPPORT)
2236
		link_width_cntl |= R600_PCIE_LC_RENEGOTIATE_EN | R600_PCIE_LC_UPCONFIGURE_SUPPORT;
3006
		link_width_cntl |= R600_PCIE_LC_RENEGOTIATE_EN | R600_PCIE_LC_UPCONFIGURE_SUPPORT;
2237
        else
3007
        else
2238
		link_width_cntl |= R600_PCIE_LC_RECONFIG_ARC_MISSING_ESCAPE;
3008
		link_width_cntl |= R600_PCIE_LC_RECONFIG_ARC_MISSING_ESCAPE;
2239
 
3009
 
2240
	WREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL, (link_width_cntl |
3010
	WREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL, (link_width_cntl |
2241
						       RADEON_PCIE_LC_RECONFIG_NOW));
3011
						       RADEON_PCIE_LC_RECONFIG_NOW));
2242
 
3012
 
2243
        if (rdev->family >= CHIP_RV770)
3013
        if (rdev->family >= CHIP_RV770)
2244
		target_reg = R700_TARGET_AND_CURRENT_PROFILE_INDEX;
3014
		target_reg = R700_TARGET_AND_CURRENT_PROFILE_INDEX;
2245
        else
3015
        else
2246
		target_reg = R600_TARGET_AND_CURRENT_PROFILE_INDEX;
3016
		target_reg = R600_TARGET_AND_CURRENT_PROFILE_INDEX;
2247
 
3017
 
2248
        /* wait for lane set to complete */
3018
        /* wait for lane set to complete */
2249
        link_width_cntl = RREG32(target_reg);
3019
        link_width_cntl = RREG32(target_reg);
2250
        while (link_width_cntl == 0xffffffff)
3020
        while (link_width_cntl == 0xffffffff)
2251
		link_width_cntl = RREG32(target_reg);
3021
		link_width_cntl = RREG32(target_reg);
2252
 
3022
 
2253
}
3023
}
2254
 
3024
 
2255
int r600_get_pcie_lanes(struct radeon_device *rdev)
3025
int r600_get_pcie_lanes(struct radeon_device *rdev)
2256
{
3026
{
2257
	u32 link_width_cntl;
3027
	u32 link_width_cntl;
2258
 
3028
 
2259
	if (rdev->flags & RADEON_IS_IGP)
3029
	if (rdev->flags & RADEON_IS_IGP)
2260
		return 0;
3030
		return 0;
2261
 
3031
 
2262
	if (!(rdev->flags & RADEON_IS_PCIE))
3032
	if (!(rdev->flags & RADEON_IS_PCIE))
2263
		return 0;
3033
		return 0;
2264
 
3034
 
2265
	/* x2 cards have a special sequence */
3035
	/* x2 cards have a special sequence */
2266
	if (ASIC_IS_X2(rdev))
3036
	if (ASIC_IS_X2(rdev))
2267
		return 0;
3037
		return 0;
2268
 
3038
 
2269
	/* FIXME wait for idle */
3039
	/* FIXME wait for idle */
2270
 
3040
 
2271
	link_width_cntl = RREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
3041
	link_width_cntl = RREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
2272
 
3042
 
2273
	switch ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) >> RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT) {
3043
	switch ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) >> RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT) {
2274
	case RADEON_PCIE_LC_LINK_WIDTH_X0:
3044
	case RADEON_PCIE_LC_LINK_WIDTH_X0:
2275
		return 0;
3045
		return 0;
2276
	case RADEON_PCIE_LC_LINK_WIDTH_X1:
3046
	case RADEON_PCIE_LC_LINK_WIDTH_X1:
2277
		return 1;
3047
		return 1;
2278
	case RADEON_PCIE_LC_LINK_WIDTH_X2:
3048
	case RADEON_PCIE_LC_LINK_WIDTH_X2:
2279
		return 2;
3049
		return 2;
2280
	case RADEON_PCIE_LC_LINK_WIDTH_X4:
3050
	case RADEON_PCIE_LC_LINK_WIDTH_X4:
2281
		return 4;
3051
		return 4;
2282
	case RADEON_PCIE_LC_LINK_WIDTH_X8:
3052
	case RADEON_PCIE_LC_LINK_WIDTH_X8:
2283
		return 8;
3053
		return 8;
2284
	case RADEON_PCIE_LC_LINK_WIDTH_X16:
3054
	case RADEON_PCIE_LC_LINK_WIDTH_X16:
2285
	default:
3055
	default:
2286
		return 16;
3056
		return 16;
2287
	}
3057
	}
2288
}
3058
}
2289
 
3059
 
2290
static void r600_pcie_gen2_enable(struct radeon_device *rdev)
3060
static void r600_pcie_gen2_enable(struct radeon_device *rdev)
2291
{
3061
{
2292
	u32 link_width_cntl, lanes, speed_cntl, training_cntl, tmp;
3062
	u32 link_width_cntl, lanes, speed_cntl, training_cntl, tmp;
2293
	u16 link_cntl2;
3063
	u16 link_cntl2;
2294
 
3064
 
2295
	if (radeon_pcie_gen2 == 0)
3065
	if (radeon_pcie_gen2 == 0)
2296
		return;
3066
		return;
2297
 
3067
 
2298
	if (rdev->flags & RADEON_IS_IGP)
3068
	if (rdev->flags & RADEON_IS_IGP)
2299
		return;
3069
		return;
2300
 
3070
 
2301
	if (!(rdev->flags & RADEON_IS_PCIE))
3071
	if (!(rdev->flags & RADEON_IS_PCIE))
2302
		return;
3072
		return;
2303
 
3073
 
2304
	/* x2 cards have a special sequence */
3074
	/* x2 cards have a special sequence */
2305
	if (ASIC_IS_X2(rdev))
3075
	if (ASIC_IS_X2(rdev))
2306
		return;
3076
		return;
2307
 
3077
 
2308
	/* only RV6xx+ chips are supported */
3078
	/* only RV6xx+ chips are supported */
2309
	if (rdev->family <= CHIP_R600)
3079
	if (rdev->family <= CHIP_R600)
2310
		return;
3080
		return;
2311
 
3081
 
2312
	/* 55 nm r6xx asics */
3082
	/* 55 nm r6xx asics */
2313
	if ((rdev->family == CHIP_RV670) ||
3083
	if ((rdev->family == CHIP_RV670) ||
2314
	    (rdev->family == CHIP_RV620) ||
3084
	    (rdev->family == CHIP_RV620) ||
2315
	    (rdev->family == CHIP_RV635)) {
3085
	    (rdev->family == CHIP_RV635)) {
2316
		/* advertise upconfig capability */
3086
		/* advertise upconfig capability */
2317
		link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
3087
		link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
2318
		link_width_cntl &= ~LC_UPCONFIGURE_DIS;
3088
		link_width_cntl &= ~LC_UPCONFIGURE_DIS;
2319
		WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
3089
		WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
2320
		link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
3090
		link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
2321
		if (link_width_cntl & LC_RENEGOTIATION_SUPPORT) {
3091
		if (link_width_cntl & LC_RENEGOTIATION_SUPPORT) {
2322
			lanes = (link_width_cntl & LC_LINK_WIDTH_RD_MASK) >> LC_LINK_WIDTH_RD_SHIFT;
3092
			lanes = (link_width_cntl & LC_LINK_WIDTH_RD_MASK) >> LC_LINK_WIDTH_RD_SHIFT;
2323
			link_width_cntl &= ~(LC_LINK_WIDTH_MASK |
3093
			link_width_cntl &= ~(LC_LINK_WIDTH_MASK |
2324
					     LC_RECONFIG_ARC_MISSING_ESCAPE);
3094
					     LC_RECONFIG_ARC_MISSING_ESCAPE);
2325
			link_width_cntl |= lanes | LC_RECONFIG_NOW | LC_RENEGOTIATE_EN;
3095
			link_width_cntl |= lanes | LC_RECONFIG_NOW | LC_RENEGOTIATE_EN;
2326
			WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
3096
			WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
2327
		} else {
3097
		} else {
2328
			link_width_cntl |= LC_UPCONFIGURE_DIS;
3098
			link_width_cntl |= LC_UPCONFIGURE_DIS;
2329
			WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
3099
			WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
2330
		}
3100
		}
2331
	}
3101
	}
2332
 
3102
 
2333
	speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
3103
	speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
2334
	if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) &&
3104
	if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) &&
2335
	    (speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) {
3105
	    (speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) {
2336
 
3106
 
2337
		/* 55 nm r6xx asics */
3107
		/* 55 nm r6xx asics */
2338
		if ((rdev->family == CHIP_RV670) ||
3108
		if ((rdev->family == CHIP_RV670) ||
2339
		    (rdev->family == CHIP_RV620) ||
3109
		    (rdev->family == CHIP_RV620) ||
2340
		    (rdev->family == CHIP_RV635)) {
3110
		    (rdev->family == CHIP_RV635)) {
2341
			WREG32(MM_CFGREGS_CNTL, 0x8);
3111
			WREG32(MM_CFGREGS_CNTL, 0x8);
2342
			link_cntl2 = RREG32(0x4088);
3112
			link_cntl2 = RREG32(0x4088);
2343
			WREG32(MM_CFGREGS_CNTL, 0);
3113
			WREG32(MM_CFGREGS_CNTL, 0);
2344
			/* not supported yet */
3114
			/* not supported yet */
2345
			if (link_cntl2 & SELECTABLE_DEEMPHASIS)
3115
			if (link_cntl2 & SELECTABLE_DEEMPHASIS)
2346
				return;
3116
				return;
2347
		}
3117
		}
2348
 
3118
 
2349
		speed_cntl &= ~LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK;
3119
		speed_cntl &= ~LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK;
2350
		speed_cntl |= (0x3 << LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT);
3120
		speed_cntl |= (0x3 << LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT);
2351
		speed_cntl &= ~LC_VOLTAGE_TIMER_SEL_MASK;
3121
		speed_cntl &= ~LC_VOLTAGE_TIMER_SEL_MASK;
2352
		speed_cntl &= ~LC_FORCE_DIS_HW_SPEED_CHANGE;
3122
		speed_cntl &= ~LC_FORCE_DIS_HW_SPEED_CHANGE;
2353
		speed_cntl |= LC_FORCE_EN_HW_SPEED_CHANGE;
3123
		speed_cntl |= LC_FORCE_EN_HW_SPEED_CHANGE;
2354
		WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
3124
		WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
2355
 
3125
 
2356
		tmp = RREG32(0x541c);
3126
		tmp = RREG32(0x541c);
2357
		WREG32(0x541c, tmp | 0x8);
3127
		WREG32(0x541c, tmp | 0x8);
2358
		WREG32(MM_CFGREGS_CNTL, MM_WR_TO_CFG_EN);
3128
		WREG32(MM_CFGREGS_CNTL, MM_WR_TO_CFG_EN);
2359
		link_cntl2 = RREG16(0x4088);
3129
		link_cntl2 = RREG16(0x4088);
2360
		link_cntl2 &= ~TARGET_LINK_SPEED_MASK;
3130
		link_cntl2 &= ~TARGET_LINK_SPEED_MASK;
2361
		link_cntl2 |= 0x2;
3131
		link_cntl2 |= 0x2;
2362
		WREG16(0x4088, link_cntl2);
3132
		WREG16(0x4088, link_cntl2);
2363
		WREG32(MM_CFGREGS_CNTL, 0);
3133
		WREG32(MM_CFGREGS_CNTL, 0);
2364
 
3134
 
2365
		if ((rdev->family == CHIP_RV670) ||
3135
		if ((rdev->family == CHIP_RV670) ||
2366
		    (rdev->family == CHIP_RV620) ||
3136
		    (rdev->family == CHIP_RV620) ||
2367
		    (rdev->family == CHIP_RV635)) {
3137
		    (rdev->family == CHIP_RV635)) {
2368
			training_cntl = RREG32_PCIE_P(PCIE_LC_TRAINING_CNTL);
3138
			training_cntl = RREG32_PCIE_P(PCIE_LC_TRAINING_CNTL);
2369
			training_cntl &= ~LC_POINT_7_PLUS_EN;
3139
			training_cntl &= ~LC_POINT_7_PLUS_EN;
2370
			WREG32_PCIE_P(PCIE_LC_TRAINING_CNTL, training_cntl);
3140
			WREG32_PCIE_P(PCIE_LC_TRAINING_CNTL, training_cntl);
2371
		} else {
3141
		} else {
2372
			speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
3142
			speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
2373
			speed_cntl &= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN;
3143
			speed_cntl &= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN;
2374
			WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
3144
			WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
2375
		}
3145
		}
2376
 
3146
 
2377
		speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
3147
		speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
2378
		speed_cntl |= LC_GEN2_EN_STRAP;
3148
		speed_cntl |= LC_GEN2_EN_STRAP;
2379
		WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
3149
		WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
2380
 
3150
 
2381
	} else {
3151
	} else {
2382
		link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
3152
		link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
2383
		/* XXX: only disable it if gen1 bridge vendor == 0x111d or 0x1106 */
3153
		/* XXX: only disable it if gen1 bridge vendor == 0x111d or 0x1106 */
2384
		if (1)
3154
		if (1)
2385
			link_width_cntl |= LC_UPCONFIGURE_DIS;
3155
			link_width_cntl |= LC_UPCONFIGURE_DIS;
2386
		else
3156
		else
2387
			link_width_cntl &= ~LC_UPCONFIGURE_DIS;
3157
			link_width_cntl &= ~LC_UPCONFIGURE_DIS;
2388
		WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
3158
		WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
2389
	}
3159
	}
2390
}
3160
}
2391
>
3161
>
2392
>
3162
>
2393
>
3163
>
2394
>
3164
>
2395
>
3165
>
2396
>
3166
>