Subversion Repositories Kolibri OS

Rev

Rev 1404 | Rev 1428 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 1404 Rev 1413
1
/*
1
/*
2
 * Copyright 2008 Advanced Micro Devices, Inc.
2
 * Copyright 2008 Advanced Micro Devices, Inc.
3
 * Copyright 2008 Red Hat Inc.
3
 * Copyright 2008 Red Hat Inc.
4
 * Copyright 2009 Jerome Glisse.
4
 * Copyright 2009 Jerome Glisse.
5
 *
5
 *
6
 * Permission is hereby granted, free of charge, to any person obtaining a
6
 * Permission is hereby granted, free of charge, to any person obtaining a
7
 * copy of this software and associated documentation files (the "Software"),
7
 * copy of this software and associated documentation files (the "Software"),
8
 * to deal in the Software without restriction, including without limitation
8
 * to deal in the Software without restriction, including without limitation
9
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10
 * and/or sell copies of the Software, and to permit persons to whom the
10
 * and/or sell copies of the Software, and to permit persons to whom the
11
 * Software is furnished to do so, subject to the following conditions:
11
 * Software is furnished to do so, subject to the following conditions:
12
 *
12
 *
13
 * The above copyright notice and this permission notice shall be included in
13
 * The above copyright notice and this permission notice shall be included in
14
 * all copies or substantial portions of the Software.
14
 * all copies or substantial portions of the Software.
15
 *
15
 *
16
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22
 * OTHER DEALINGS IN THE SOFTWARE.
22
 * OTHER DEALINGS IN THE SOFTWARE.
23
 *
23
 *
24
 * Authors: Dave Airlie
24
 * Authors: Dave Airlie
25
 *          Alex Deucher
25
 *          Alex Deucher
26
 *          Jerome Glisse
26
 *          Jerome Glisse
27
 */
27
 */
28
#include 
28
#include 
29
#include 
29
#include 
30
#include "drmP.h"
30
#include "drmP.h"
31
#include "radeon_drm.h"
31
#include "radeon_drm.h"
32
#include "radeon.h"
32
#include "radeon.h"
33
#include "radeon_mode.h"
33
#include "radeon_mode.h"
34
#include "r600d.h"
34
#include "r600d.h"
35
#include "atom.h"
35
#include "atom.h"
36
#include "avivod.h"
36
#include "avivod.h"
37
 
37
 
38
#define PFP_UCODE_SIZE 576
38
#define PFP_UCODE_SIZE 576
39
#define PM4_UCODE_SIZE 1792
39
#define PM4_UCODE_SIZE 1792
40
#define RLC_UCODE_SIZE 768
40
#define RLC_UCODE_SIZE 768
41
#define R700_PFP_UCODE_SIZE 848
41
#define R700_PFP_UCODE_SIZE 848
42
#define R700_PM4_UCODE_SIZE 1360
42
#define R700_PM4_UCODE_SIZE 1360
43
#define R700_RLC_UCODE_SIZE 1024
43
#define R700_RLC_UCODE_SIZE 1024
44
 
44
 
45
/* Firmware Names */
45
/* Firmware Names */
46
MODULE_FIRMWARE("radeon/R600_pfp.bin");
46
MODULE_FIRMWARE("radeon/R600_pfp.bin");
47
MODULE_FIRMWARE("radeon/R600_me.bin");
47
MODULE_FIRMWARE("radeon/R600_me.bin");
48
MODULE_FIRMWARE("radeon/RV610_pfp.bin");
48
MODULE_FIRMWARE("radeon/RV610_pfp.bin");
49
MODULE_FIRMWARE("radeon/RV610_me.bin");
49
MODULE_FIRMWARE("radeon/RV610_me.bin");
50
MODULE_FIRMWARE("radeon/RV630_pfp.bin");
50
MODULE_FIRMWARE("radeon/RV630_pfp.bin");
51
MODULE_FIRMWARE("radeon/RV630_me.bin");
51
MODULE_FIRMWARE("radeon/RV630_me.bin");
52
MODULE_FIRMWARE("radeon/RV620_pfp.bin");
52
MODULE_FIRMWARE("radeon/RV620_pfp.bin");
53
MODULE_FIRMWARE("radeon/RV620_me.bin");
53
MODULE_FIRMWARE("radeon/RV620_me.bin");
54
MODULE_FIRMWARE("radeon/RV635_pfp.bin");
54
MODULE_FIRMWARE("radeon/RV635_pfp.bin");
55
MODULE_FIRMWARE("radeon/RV635_me.bin");
55
MODULE_FIRMWARE("radeon/RV635_me.bin");
56
MODULE_FIRMWARE("radeon/RV670_pfp.bin");
56
MODULE_FIRMWARE("radeon/RV670_pfp.bin");
57
MODULE_FIRMWARE("radeon/RV670_me.bin");
57
MODULE_FIRMWARE("radeon/RV670_me.bin");
58
MODULE_FIRMWARE("radeon/RS780_pfp.bin");
58
MODULE_FIRMWARE("radeon/RS780_pfp.bin");
59
MODULE_FIRMWARE("radeon/RS780_me.bin");
59
MODULE_FIRMWARE("radeon/RS780_me.bin");
60
MODULE_FIRMWARE("radeon/RV770_pfp.bin");
60
MODULE_FIRMWARE("radeon/RV770_pfp.bin");
61
MODULE_FIRMWARE("radeon/RV770_me.bin");
61
MODULE_FIRMWARE("radeon/RV770_me.bin");
62
MODULE_FIRMWARE("radeon/RV730_pfp.bin");
62
MODULE_FIRMWARE("radeon/RV730_pfp.bin");
63
MODULE_FIRMWARE("radeon/RV730_me.bin");
63
MODULE_FIRMWARE("radeon/RV730_me.bin");
64
MODULE_FIRMWARE("radeon/RV710_pfp.bin");
64
MODULE_FIRMWARE("radeon/RV710_pfp.bin");
65
MODULE_FIRMWARE("radeon/RV710_me.bin");
65
MODULE_FIRMWARE("radeon/RV710_me.bin");
66
MODULE_FIRMWARE("radeon/R600_rlc.bin");
66
MODULE_FIRMWARE("radeon/R600_rlc.bin");
67
MODULE_FIRMWARE("radeon/R700_rlc.bin");
67
MODULE_FIRMWARE("radeon/R700_rlc.bin");
68
 
68
 
69
int r600_debugfs_mc_info_init(struct radeon_device *rdev);
69
int r600_debugfs_mc_info_init(struct radeon_device *rdev);
70
 
70
 
71
/* r600,rv610,rv630,rv620,rv635,rv670 */
71
/* r600,rv610,rv630,rv620,rv635,rv670 */
72
int r600_mc_wait_for_idle(struct radeon_device *rdev);
72
int r600_mc_wait_for_idle(struct radeon_device *rdev);
73
void r600_gpu_init(struct radeon_device *rdev);
73
void r600_gpu_init(struct radeon_device *rdev);
74
void r600_fini(struct radeon_device *rdev);
74
void r600_fini(struct radeon_device *rdev);
75
 
75
 
76
/* hpd for digital panel detect/disconnect */
76
/* hpd for digital panel detect/disconnect */
77
bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
77
bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
78
{
78
{
79
	bool connected = false;
79
	bool connected = false;
80
 
80
 
81
	if (ASIC_IS_DCE3(rdev)) {
81
	if (ASIC_IS_DCE3(rdev)) {
82
		switch (hpd) {
82
		switch (hpd) {
83
		case RADEON_HPD_1:
83
		case RADEON_HPD_1:
84
			if (RREG32(DC_HPD1_INT_STATUS) & DC_HPDx_SENSE)
84
			if (RREG32(DC_HPD1_INT_STATUS) & DC_HPDx_SENSE)
85
				connected = true;
85
				connected = true;
86
			break;
86
			break;
87
		case RADEON_HPD_2:
87
		case RADEON_HPD_2:
88
			if (RREG32(DC_HPD2_INT_STATUS) & DC_HPDx_SENSE)
88
			if (RREG32(DC_HPD2_INT_STATUS) & DC_HPDx_SENSE)
89
				connected = true;
89
				connected = true;
90
			break;
90
			break;
91
		case RADEON_HPD_3:
91
		case RADEON_HPD_3:
92
			if (RREG32(DC_HPD3_INT_STATUS) & DC_HPDx_SENSE)
92
			if (RREG32(DC_HPD3_INT_STATUS) & DC_HPDx_SENSE)
93
				connected = true;
93
				connected = true;
94
			break;
94
			break;
95
		case RADEON_HPD_4:
95
		case RADEON_HPD_4:
96
			if (RREG32(DC_HPD4_INT_STATUS) & DC_HPDx_SENSE)
96
			if (RREG32(DC_HPD4_INT_STATUS) & DC_HPDx_SENSE)
97
				connected = true;
97
				connected = true;
98
			break;
98
			break;
99
			/* DCE 3.2 */
99
			/* DCE 3.2 */
100
		case RADEON_HPD_5:
100
		case RADEON_HPD_5:
101
			if (RREG32(DC_HPD5_INT_STATUS) & DC_HPDx_SENSE)
101
			if (RREG32(DC_HPD5_INT_STATUS) & DC_HPDx_SENSE)
102
				connected = true;
102
				connected = true;
103
			break;
103
			break;
104
		case RADEON_HPD_6:
104
		case RADEON_HPD_6:
105
			if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE)
105
			if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE)
106
				connected = true;
106
				connected = true;
107
			break;
107
			break;
108
		default:
108
		default:
109
			break;
109
			break;
110
		}
110
		}
111
	} else {
111
	} else {
112
		switch (hpd) {
112
		switch (hpd) {
113
		case RADEON_HPD_1:
113
		case RADEON_HPD_1:
114
			if (RREG32(DC_HOT_PLUG_DETECT1_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
114
			if (RREG32(DC_HOT_PLUG_DETECT1_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
115
				connected = true;
115
				connected = true;
116
			break;
116
			break;
117
		case RADEON_HPD_2:
117
		case RADEON_HPD_2:
118
			if (RREG32(DC_HOT_PLUG_DETECT2_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
118
			if (RREG32(DC_HOT_PLUG_DETECT2_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
119
				connected = true;
119
				connected = true;
120
			break;
120
			break;
121
		case RADEON_HPD_3:
121
		case RADEON_HPD_3:
122
			if (RREG32(DC_HOT_PLUG_DETECT3_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
122
			if (RREG32(DC_HOT_PLUG_DETECT3_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
123
				connected = true;
123
				connected = true;
124
			break;
124
			break;
125
		default:
125
		default:
126
			break;
126
			break;
127
		}
127
		}
128
	}
128
	}
129
	return connected;
129
	return connected;
130
}
130
}
131
 
131
 
132
void r600_hpd_set_polarity(struct radeon_device *rdev,
132
void r600_hpd_set_polarity(struct radeon_device *rdev,
133
			   enum radeon_hpd_id hpd)
133
			   enum radeon_hpd_id hpd)
134
{
134
{
135
	u32 tmp;
135
	u32 tmp;
136
	bool connected = r600_hpd_sense(rdev, hpd);
136
	bool connected = r600_hpd_sense(rdev, hpd);
137
 
137
 
138
	if (ASIC_IS_DCE3(rdev)) {
138
	if (ASIC_IS_DCE3(rdev)) {
139
		switch (hpd) {
139
		switch (hpd) {
140
		case RADEON_HPD_1:
140
		case RADEON_HPD_1:
141
			tmp = RREG32(DC_HPD1_INT_CONTROL);
141
			tmp = RREG32(DC_HPD1_INT_CONTROL);
142
			if (connected)
142
			if (connected)
143
				tmp &= ~DC_HPDx_INT_POLARITY;
143
				tmp &= ~DC_HPDx_INT_POLARITY;
144
			else
144
			else
145
				tmp |= DC_HPDx_INT_POLARITY;
145
				tmp |= DC_HPDx_INT_POLARITY;
146
			WREG32(DC_HPD1_INT_CONTROL, tmp);
146
			WREG32(DC_HPD1_INT_CONTROL, tmp);
147
			break;
147
			break;
148
		case RADEON_HPD_2:
148
		case RADEON_HPD_2:
149
			tmp = RREG32(DC_HPD2_INT_CONTROL);
149
			tmp = RREG32(DC_HPD2_INT_CONTROL);
150
			if (connected)
150
			if (connected)
151
				tmp &= ~DC_HPDx_INT_POLARITY;
151
				tmp &= ~DC_HPDx_INT_POLARITY;
152
			else
152
			else
153
				tmp |= DC_HPDx_INT_POLARITY;
153
				tmp |= DC_HPDx_INT_POLARITY;
154
			WREG32(DC_HPD2_INT_CONTROL, tmp);
154
			WREG32(DC_HPD2_INT_CONTROL, tmp);
155
			break;
155
			break;
156
		case RADEON_HPD_3:
156
		case RADEON_HPD_3:
157
			tmp = RREG32(DC_HPD3_INT_CONTROL);
157
			tmp = RREG32(DC_HPD3_INT_CONTROL);
158
			if (connected)
158
			if (connected)
159
				tmp &= ~DC_HPDx_INT_POLARITY;
159
				tmp &= ~DC_HPDx_INT_POLARITY;
160
			else
160
			else
161
				tmp |= DC_HPDx_INT_POLARITY;
161
				tmp |= DC_HPDx_INT_POLARITY;
162
			WREG32(DC_HPD3_INT_CONTROL, tmp);
162
			WREG32(DC_HPD3_INT_CONTROL, tmp);
163
			break;
163
			break;
164
		case RADEON_HPD_4:
164
		case RADEON_HPD_4:
165
			tmp = RREG32(DC_HPD4_INT_CONTROL);
165
			tmp = RREG32(DC_HPD4_INT_CONTROL);
166
			if (connected)
166
			if (connected)
167
				tmp &= ~DC_HPDx_INT_POLARITY;
167
				tmp &= ~DC_HPDx_INT_POLARITY;
168
			else
168
			else
169
				tmp |= DC_HPDx_INT_POLARITY;
169
				tmp |= DC_HPDx_INT_POLARITY;
170
			WREG32(DC_HPD4_INT_CONTROL, tmp);
170
			WREG32(DC_HPD4_INT_CONTROL, tmp);
171
			break;
171
			break;
172
		case RADEON_HPD_5:
172
		case RADEON_HPD_5:
173
			tmp = RREG32(DC_HPD5_INT_CONTROL);
173
			tmp = RREG32(DC_HPD5_INT_CONTROL);
174
			if (connected)
174
			if (connected)
175
				tmp &= ~DC_HPDx_INT_POLARITY;
175
				tmp &= ~DC_HPDx_INT_POLARITY;
176
			else
176
			else
177
				tmp |= DC_HPDx_INT_POLARITY;
177
				tmp |= DC_HPDx_INT_POLARITY;
178
			WREG32(DC_HPD5_INT_CONTROL, tmp);
178
			WREG32(DC_HPD5_INT_CONTROL, tmp);
179
			break;
179
			break;
180
			/* DCE 3.2 */
180
			/* DCE 3.2 */
181
		case RADEON_HPD_6:
181
		case RADEON_HPD_6:
182
			tmp = RREG32(DC_HPD6_INT_CONTROL);
182
			tmp = RREG32(DC_HPD6_INT_CONTROL);
183
			if (connected)
183
			if (connected)
184
				tmp &= ~DC_HPDx_INT_POLARITY;
184
				tmp &= ~DC_HPDx_INT_POLARITY;
185
			else
185
			else
186
				tmp |= DC_HPDx_INT_POLARITY;
186
				tmp |= DC_HPDx_INT_POLARITY;
187
			WREG32(DC_HPD6_INT_CONTROL, tmp);
187
			WREG32(DC_HPD6_INT_CONTROL, tmp);
188
			break;
188
			break;
189
		default:
189
		default:
190
			break;
190
			break;
191
		}
191
		}
192
	} else {
192
	} else {
193
		switch (hpd) {
193
		switch (hpd) {
194
		case RADEON_HPD_1:
194
		case RADEON_HPD_1:
195
			tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL);
195
			tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL);
196
			if (connected)
196
			if (connected)
197
				tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
197
				tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
198
			else
198
			else
199
				tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
199
				tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
200
			WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
200
			WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
201
			break;
201
			break;
202
		case RADEON_HPD_2:
202
		case RADEON_HPD_2:
203
			tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL);
203
			tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL);
204
			if (connected)
204
			if (connected)
205
				tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
205
				tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
206
			else
206
			else
207
				tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
207
				tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
208
			WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
208
			WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
209
			break;
209
			break;
210
		case RADEON_HPD_3:
210
		case RADEON_HPD_3:
211
			tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL);
211
			tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL);
212
			if (connected)
212
			if (connected)
213
				tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
213
				tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
214
			else
214
			else
215
				tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
215
				tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
216
			WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
216
			WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
217
			break;
217
			break;
218
		default:
218
		default:
219
			break;
219
			break;
220
		}
220
		}
221
	}
221
	}
222
}
222
}
223
 
223
 
224
void r600_hpd_init(struct radeon_device *rdev)
224
void r600_hpd_init(struct radeon_device *rdev)
225
{
225
{
226
	struct drm_device *dev = rdev->ddev;
226
	struct drm_device *dev = rdev->ddev;
227
	struct drm_connector *connector;
227
	struct drm_connector *connector;
228
 
228
 
229
	if (ASIC_IS_DCE3(rdev)) {
229
	if (ASIC_IS_DCE3(rdev)) {
230
		u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) | DC_HPDx_RX_INT_TIMER(0xfa);
230
		u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) | DC_HPDx_RX_INT_TIMER(0xfa);
231
		if (ASIC_IS_DCE32(rdev))
231
		if (ASIC_IS_DCE32(rdev))
232
			tmp |= DC_HPDx_EN;
232
			tmp |= DC_HPDx_EN;
233
 
233
 
234
		list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
234
		list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
235
			struct radeon_connector *radeon_connector = to_radeon_connector(connector);
235
			struct radeon_connector *radeon_connector = to_radeon_connector(connector);
236
			switch (radeon_connector->hpd.hpd) {
236
			switch (radeon_connector->hpd.hpd) {
237
			case RADEON_HPD_1:
237
			case RADEON_HPD_1:
238
				WREG32(DC_HPD1_CONTROL, tmp);
238
				WREG32(DC_HPD1_CONTROL, tmp);
239
//               rdev->irq.hpd[0] = true;
239
//               rdev->irq.hpd[0] = true;
240
				break;
240
				break;
241
			case RADEON_HPD_2:
241
			case RADEON_HPD_2:
242
				WREG32(DC_HPD2_CONTROL, tmp);
242
				WREG32(DC_HPD2_CONTROL, tmp);
243
//               rdev->irq.hpd[1] = true;
243
//               rdev->irq.hpd[1] = true;
244
				break;
244
				break;
245
			case RADEON_HPD_3:
245
			case RADEON_HPD_3:
246
				WREG32(DC_HPD3_CONTROL, tmp);
246
				WREG32(DC_HPD3_CONTROL, tmp);
247
//               rdev->irq.hpd[2] = true;
247
//               rdev->irq.hpd[2] = true;
248
				break;
248
				break;
249
			case RADEON_HPD_4:
249
			case RADEON_HPD_4:
250
				WREG32(DC_HPD4_CONTROL, tmp);
250
				WREG32(DC_HPD4_CONTROL, tmp);
251
//               rdev->irq.hpd[3] = true;
251
//               rdev->irq.hpd[3] = true;
252
				break;
252
				break;
253
				/* DCE 3.2 */
253
				/* DCE 3.2 */
254
			case RADEON_HPD_5:
254
			case RADEON_HPD_5:
255
				WREG32(DC_HPD5_CONTROL, tmp);
255
				WREG32(DC_HPD5_CONTROL, tmp);
256
//               rdev->irq.hpd[4] = true;
256
//               rdev->irq.hpd[4] = true;
257
				break;
257
				break;
258
			case RADEON_HPD_6:
258
			case RADEON_HPD_6:
259
				WREG32(DC_HPD6_CONTROL, tmp);
259
				WREG32(DC_HPD6_CONTROL, tmp);
260
//               rdev->irq.hpd[5] = true;
260
//               rdev->irq.hpd[5] = true;
261
				break;
261
				break;
262
			default:
262
			default:
263
				break;
263
				break;
264
			}
264
			}
265
		}
265
		}
266
	} else {
266
	} else {
267
		list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
267
		list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
268
			struct radeon_connector *radeon_connector = to_radeon_connector(connector);
268
			struct radeon_connector *radeon_connector = to_radeon_connector(connector);
269
			switch (radeon_connector->hpd.hpd) {
269
			switch (radeon_connector->hpd.hpd) {
270
			case RADEON_HPD_1:
270
			case RADEON_HPD_1:
271
				WREG32(DC_HOT_PLUG_DETECT1_CONTROL, DC_HOT_PLUG_DETECTx_EN);
271
				WREG32(DC_HOT_PLUG_DETECT1_CONTROL, DC_HOT_PLUG_DETECTx_EN);
272
//               rdev->irq.hpd[0] = true;
272
//               rdev->irq.hpd[0] = true;
273
				break;
273
				break;
274
			case RADEON_HPD_2:
274
			case RADEON_HPD_2:
275
				WREG32(DC_HOT_PLUG_DETECT2_CONTROL, DC_HOT_PLUG_DETECTx_EN);
275
				WREG32(DC_HOT_PLUG_DETECT2_CONTROL, DC_HOT_PLUG_DETECTx_EN);
276
//               rdev->irq.hpd[1] = true;
276
//               rdev->irq.hpd[1] = true;
277
				break;
277
				break;
278
			case RADEON_HPD_3:
278
			case RADEON_HPD_3:
279
				WREG32(DC_HOT_PLUG_DETECT3_CONTROL, DC_HOT_PLUG_DETECTx_EN);
279
				WREG32(DC_HOT_PLUG_DETECT3_CONTROL, DC_HOT_PLUG_DETECTx_EN);
280
//               rdev->irq.hpd[2] = true;
280
//               rdev->irq.hpd[2] = true;
281
				break;
281
				break;
282
			default:
282
			default:
283
				break;
283
				break;
284
			}
284
			}
285
		}
285
		}
286
	}
286
	}
287
//   if (rdev->irq.installed)
287
//   if (rdev->irq.installed)
288
//   r600_irq_set(rdev);
288
//   r600_irq_set(rdev);
289
}
289
}
290
 
290
 
291
void r600_hpd_fini(struct radeon_device *rdev)
291
void r600_hpd_fini(struct radeon_device *rdev)
292
{
292
{
293
	struct drm_device *dev = rdev->ddev;
293
	struct drm_device *dev = rdev->ddev;
294
	struct drm_connector *connector;
294
	struct drm_connector *connector;
295
 
295
 
296
	if (ASIC_IS_DCE3(rdev)) {
296
	if (ASIC_IS_DCE3(rdev)) {
297
		list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
297
		list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
298
			struct radeon_connector *radeon_connector = to_radeon_connector(connector);
298
			struct radeon_connector *radeon_connector = to_radeon_connector(connector);
299
			switch (radeon_connector->hpd.hpd) {
299
			switch (radeon_connector->hpd.hpd) {
300
			case RADEON_HPD_1:
300
			case RADEON_HPD_1:
301
				WREG32(DC_HPD1_CONTROL, 0);
301
				WREG32(DC_HPD1_CONTROL, 0);
302
//               rdev->irq.hpd[0] = false;
302
//               rdev->irq.hpd[0] = false;
303
				break;
303
				break;
304
			case RADEON_HPD_2:
304
			case RADEON_HPD_2:
305
				WREG32(DC_HPD2_CONTROL, 0);
305
				WREG32(DC_HPD2_CONTROL, 0);
306
//               rdev->irq.hpd[1] = false;
306
//               rdev->irq.hpd[1] = false;
307
				break;
307
				break;
308
			case RADEON_HPD_3:
308
			case RADEON_HPD_3:
309
				WREG32(DC_HPD3_CONTROL, 0);
309
				WREG32(DC_HPD3_CONTROL, 0);
310
//               rdev->irq.hpd[2] = false;
310
//               rdev->irq.hpd[2] = false;
311
				break;
311
				break;
312
			case RADEON_HPD_4:
312
			case RADEON_HPD_4:
313
				WREG32(DC_HPD4_CONTROL, 0);
313
				WREG32(DC_HPD4_CONTROL, 0);
314
//               rdev->irq.hpd[3] = false;
314
//               rdev->irq.hpd[3] = false;
315
				break;
315
				break;
316
				/* DCE 3.2 */
316
				/* DCE 3.2 */
317
			case RADEON_HPD_5:
317
			case RADEON_HPD_5:
318
				WREG32(DC_HPD5_CONTROL, 0);
318
				WREG32(DC_HPD5_CONTROL, 0);
319
//               rdev->irq.hpd[4] = false;
319
//               rdev->irq.hpd[4] = false;
320
				break;
320
				break;
321
			case RADEON_HPD_6:
321
			case RADEON_HPD_6:
322
				WREG32(DC_HPD6_CONTROL, 0);
322
				WREG32(DC_HPD6_CONTROL, 0);
323
//               rdev->irq.hpd[5] = false;
323
//               rdev->irq.hpd[5] = false;
324
				break;
324
				break;
325
			default:
325
			default:
326
				break;
326
				break;
327
			}
327
			}
328
		}
328
		}
329
	} else {
329
	} else {
330
		list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
330
		list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
331
			struct radeon_connector *radeon_connector = to_radeon_connector(connector);
331
			struct radeon_connector *radeon_connector = to_radeon_connector(connector);
332
			switch (radeon_connector->hpd.hpd) {
332
			switch (radeon_connector->hpd.hpd) {
333
			case RADEON_HPD_1:
333
			case RADEON_HPD_1:
334
				WREG32(DC_HOT_PLUG_DETECT1_CONTROL, 0);
334
				WREG32(DC_HOT_PLUG_DETECT1_CONTROL, 0);
335
//               rdev->irq.hpd[0] = false;
335
//               rdev->irq.hpd[0] = false;
336
				break;
336
				break;
337
			case RADEON_HPD_2:
337
			case RADEON_HPD_2:
338
				WREG32(DC_HOT_PLUG_DETECT2_CONTROL, 0);
338
				WREG32(DC_HOT_PLUG_DETECT2_CONTROL, 0);
339
//               rdev->irq.hpd[1] = false;
339
//               rdev->irq.hpd[1] = false;
340
				break;
340
				break;
341
			case RADEON_HPD_3:
341
			case RADEON_HPD_3:
342
				WREG32(DC_HOT_PLUG_DETECT3_CONTROL, 0);
342
				WREG32(DC_HOT_PLUG_DETECT3_CONTROL, 0);
343
//               rdev->irq.hpd[2] = false;
343
//               rdev->irq.hpd[2] = false;
344
				break;
344
				break;
345
			default:
345
			default:
346
				break;
346
				break;
347
			}
347
			}
348
		}
348
		}
349
	}
349
	}
350
}
350
}
351
 
351
 
352
/*
352
/*
353
 * R600 PCIE GART
353
 * R600 PCIE GART
354
 */
354
 */
355
int r600_gart_clear_page(struct radeon_device *rdev, int i)
355
int r600_gart_clear_page(struct radeon_device *rdev, int i)
356
{
356
{
357
	void __iomem *ptr = (void *)rdev->gart.table.vram.ptr;
357
	void __iomem *ptr = (void *)rdev->gart.table.vram.ptr;
358
	u64 pte;
358
	u64 pte;
359
 
359
 
360
	if (i < 0 || i > rdev->gart.num_gpu_pages)
360
	if (i < 0 || i > rdev->gart.num_gpu_pages)
361
		return -EINVAL;
361
		return -EINVAL;
362
	pte = 0;
362
	pte = 0;
363
	writeq(pte, ((void __iomem *)ptr) + (i * 8));
363
	writeq(pte, ((void __iomem *)ptr) + (i * 8));
364
	return 0;
364
	return 0;
365
}
365
}
366
 
366
 
367
void r600_pcie_gart_tlb_flush(struct radeon_device *rdev)
367
void r600_pcie_gart_tlb_flush(struct radeon_device *rdev)
368
{
368
{
369
	unsigned i;
369
	unsigned i;
370
	u32 tmp;
370
	u32 tmp;
371
 
371
 
372
	WREG32(VM_CONTEXT0_INVALIDATION_LOW_ADDR, rdev->mc.gtt_start >> 12);
372
	WREG32(VM_CONTEXT0_INVALIDATION_LOW_ADDR, rdev->mc.gtt_start >> 12);
373
	WREG32(VM_CONTEXT0_INVALIDATION_HIGH_ADDR, (rdev->mc.gtt_end - 1) >> 12);
373
	WREG32(VM_CONTEXT0_INVALIDATION_HIGH_ADDR, (rdev->mc.gtt_end - 1) >> 12);
374
	WREG32(VM_CONTEXT0_REQUEST_RESPONSE, REQUEST_TYPE(1));
374
	WREG32(VM_CONTEXT0_REQUEST_RESPONSE, REQUEST_TYPE(1));
375
	for (i = 0; i < rdev->usec_timeout; i++) {
375
	for (i = 0; i < rdev->usec_timeout; i++) {
376
		/* read MC_STATUS */
376
		/* read MC_STATUS */
377
		tmp = RREG32(VM_CONTEXT0_REQUEST_RESPONSE);
377
		tmp = RREG32(VM_CONTEXT0_REQUEST_RESPONSE);
378
		tmp = (tmp & RESPONSE_TYPE_MASK) >> RESPONSE_TYPE_SHIFT;
378
		tmp = (tmp & RESPONSE_TYPE_MASK) >> RESPONSE_TYPE_SHIFT;
379
		if (tmp == 2) {
379
		if (tmp == 2) {
380
			printk(KERN_WARNING "[drm] r600 flush TLB failed\n");
380
			printk(KERN_WARNING "[drm] r600 flush TLB failed\n");
381
			return;
381
			return;
382
		}
382
		}
383
		if (tmp) {
383
		if (tmp) {
384
			return;
384
			return;
385
		}
385
		}
386
		udelay(1);
386
		udelay(1);
387
	}
387
	}
388
}
388
}
389
 
389
 
390
int r600_pcie_gart_init(struct radeon_device *rdev)
390
int r600_pcie_gart_init(struct radeon_device *rdev)
391
{
391
{
392
	int r;
392
	int r;
393
 
393
 
394
	if (rdev->gart.table.vram.robj) {
394
	if (rdev->gart.table.vram.robj) {
395
		WARN(1, "R600 PCIE GART already initialized.\n");
395
		WARN(1, "R600 PCIE GART already initialized.\n");
396
		return 0;
396
		return 0;
397
	}
397
	}
398
	/* Initialize common gart structure */
398
	/* Initialize common gart structure */
399
	r = radeon_gart_init(rdev);
399
	r = radeon_gart_init(rdev);
400
	if (r)
400
	if (r)
401
		return r;
401
		return r;
402
	rdev->gart.table_size = rdev->gart.num_gpu_pages * 8;
402
	rdev->gart.table_size = rdev->gart.num_gpu_pages * 8;
403
	return radeon_gart_table_vram_alloc(rdev);
403
	return radeon_gart_table_vram_alloc(rdev);
404
}
404
}
405
 
405
 
406
int r600_pcie_gart_enable(struct radeon_device *rdev)
406
int r600_pcie_gart_enable(struct radeon_device *rdev)
407
{
407
{
408
	u32 tmp;
408
	u32 tmp;
409
	int r, i;
409
	int r, i;
410
 
410
 
411
	if (rdev->gart.table.vram.robj == NULL) {
411
	if (rdev->gart.table.vram.robj == NULL) {
412
		dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
412
		dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
413
		return -EINVAL;
413
		return -EINVAL;
414
	}
414
	}
415
	r = radeon_gart_table_vram_pin(rdev);
415
	r = radeon_gart_table_vram_pin(rdev);
416
	if (r)
416
	if (r)
417
		return r;
417
		return r;
418
 
418
 
419
	/* Setup L2 cache */
419
	/* Setup L2 cache */
420
	WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
420
	WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
421
				ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
421
				ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
422
				EFFECTIVE_L2_QUEUE_SIZE(7));
422
				EFFECTIVE_L2_QUEUE_SIZE(7));
423
	WREG32(VM_L2_CNTL2, 0);
423
	WREG32(VM_L2_CNTL2, 0);
424
	WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
424
	WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
425
	/* Setup TLB control */
425
	/* Setup TLB control */
426
	tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
426
	tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
427
		SYSTEM_ACCESS_MODE_NOT_IN_SYS |
427
		SYSTEM_ACCESS_MODE_NOT_IN_SYS |
428
		EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
428
		EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
429
		ENABLE_WAIT_L2_QUERY;
429
		ENABLE_WAIT_L2_QUERY;
430
	WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
430
	WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
431
	WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
431
	WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
432
	WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp | ENABLE_L1_STRICT_ORDERING);
432
	WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp | ENABLE_L1_STRICT_ORDERING);
433
	WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
433
	WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
434
	WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
434
	WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
435
	WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
435
	WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
436
	WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
436
	WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
437
	WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
437
	WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
438
	WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
438
	WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
439
	WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
439
	WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
440
	WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
440
	WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
441
	WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
441
	WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
442
	WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
442
	WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
443
	WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
443
	WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
444
	WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
444
	WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
445
	WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
445
	WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
446
	WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
446
	WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
447
	WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
447
	WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
448
				RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
448
				RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
449
	WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
449
	WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
450
			(u32)(rdev->dummy_page.addr >> 12));
450
			(u32)(rdev->dummy_page.addr >> 12));
451
	for (i = 1; i < 7; i++)
451
	for (i = 1; i < 7; i++)
452
		WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
452
		WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
453
 
453
 
454
	r600_pcie_gart_tlb_flush(rdev);
454
	r600_pcie_gart_tlb_flush(rdev);
455
	rdev->gart.ready = true;
455
	rdev->gart.ready = true;
456
	return 0;
456
	return 0;
457
}
457
}
458
 
458
 
459
void r600_pcie_gart_disable(struct radeon_device *rdev)
459
void r600_pcie_gart_disable(struct radeon_device *rdev)
460
{
460
{
461
	u32 tmp;
461
	u32 tmp;
462
	int i, r;
462
	int i, r;
463
 
463
 
464
	/* Disable all tables */
464
	/* Disable all tables */
465
	for (i = 0; i < 7; i++)
465
	for (i = 0; i < 7; i++)
466
		WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
466
		WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
467
 
467
 
468
	/* Disable L2 cache */
468
	/* Disable L2 cache */
469
	WREG32(VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING |
469
	WREG32(VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING |
470
				EFFECTIVE_L2_QUEUE_SIZE(7));
470
				EFFECTIVE_L2_QUEUE_SIZE(7));
471
	WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
471
	WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
472
	/* Setup L1 TLB control */
472
	/* Setup L1 TLB control */
473
	tmp = EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
473
	tmp = EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
474
		ENABLE_WAIT_L2_QUERY;
474
		ENABLE_WAIT_L2_QUERY;
475
	WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
475
	WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
476
	WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
476
	WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
477
	WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
477
	WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
478
	WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
478
	WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
479
	WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
479
	WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
480
	WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
480
	WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
481
	WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
481
	WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
482
	WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
482
	WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
483
	WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp);
483
	WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp);
484
	WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp);
484
	WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp);
485
	WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
485
	WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
486
	WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
486
	WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
487
	WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp);
487
	WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp);
488
	WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
488
	WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
489
	if (rdev->gart.table.vram.robj) {
489
	if (rdev->gart.table.vram.robj) {
490
		r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
490
		r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
491
		if (likely(r == 0)) {
491
		if (likely(r == 0)) {
492
			radeon_bo_kunmap(rdev->gart.table.vram.robj);
492
			radeon_bo_kunmap(rdev->gart.table.vram.robj);
493
			radeon_bo_unpin(rdev->gart.table.vram.robj);
493
			radeon_bo_unpin(rdev->gart.table.vram.robj);
494
			radeon_bo_unreserve(rdev->gart.table.vram.robj);
494
			radeon_bo_unreserve(rdev->gart.table.vram.robj);
495
		}
495
		}
496
	}
496
	}
497
}
497
}
498
 
498
 
499
void r600_pcie_gart_fini(struct radeon_device *rdev)
499
void r600_pcie_gart_fini(struct radeon_device *rdev)
500
{
500
{
501
	r600_pcie_gart_disable(rdev);
501
	r600_pcie_gart_disable(rdev);
502
	radeon_gart_table_vram_free(rdev);
502
	radeon_gart_table_vram_free(rdev);
503
	radeon_gart_fini(rdev);
503
	radeon_gart_fini(rdev);
504
}
504
}
505
 
505
 
506
void r600_agp_enable(struct radeon_device *rdev)
506
void r600_agp_enable(struct radeon_device *rdev)
507
{
507
{
508
	u32 tmp;
508
	u32 tmp;
509
	int i;
509
	int i;
510
 
510
 
511
	/* Setup L2 cache */
511
	/* Setup L2 cache */
512
	WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
512
	WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
513
				ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
513
				ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
514
				EFFECTIVE_L2_QUEUE_SIZE(7));
514
				EFFECTIVE_L2_QUEUE_SIZE(7));
515
	WREG32(VM_L2_CNTL2, 0);
515
	WREG32(VM_L2_CNTL2, 0);
516
	WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
516
	WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
517
	/* Setup TLB control */
517
	/* Setup TLB control */
518
	tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
518
	tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
519
		SYSTEM_ACCESS_MODE_NOT_IN_SYS |
519
		SYSTEM_ACCESS_MODE_NOT_IN_SYS |
520
		EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
520
		EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
521
		ENABLE_WAIT_L2_QUERY;
521
		ENABLE_WAIT_L2_QUERY;
522
	WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
522
	WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
523
	WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
523
	WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
524
	WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp | ENABLE_L1_STRICT_ORDERING);
524
	WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp | ENABLE_L1_STRICT_ORDERING);
525
	WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
525
	WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
526
	WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
526
	WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
527
	WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
527
	WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
528
	WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
528
	WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
529
	WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
529
	WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
530
	WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
530
	WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
531
	WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
531
	WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
532
	WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
532
	WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
533
	WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
533
	WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
534
	WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
534
	WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
535
	WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
535
	WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
536
	for (i = 0; i < 7; i++)
536
	for (i = 0; i < 7; i++)
537
		WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
537
		WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
538
}
538
}
539
 
539
 
540
int r600_mc_wait_for_idle(struct radeon_device *rdev)
540
int r600_mc_wait_for_idle(struct radeon_device *rdev)
541
{
541
{
542
	unsigned i;
542
	unsigned i;
543
	u32 tmp;
543
	u32 tmp;
544
 
544
 
545
	for (i = 0; i < rdev->usec_timeout; i++) {
545
	for (i = 0; i < rdev->usec_timeout; i++) {
546
		/* read MC_STATUS */
546
		/* read MC_STATUS */
547
		tmp = RREG32(R_000E50_SRBM_STATUS) & 0x3F00;
547
		tmp = RREG32(R_000E50_SRBM_STATUS) & 0x3F00;
548
		if (!tmp)
548
		if (!tmp)
549
	return 0;
549
	return 0;
550
		udelay(1);
550
		udelay(1);
551
	}
551
	}
552
	return -1;
552
	return -1;
553
}
553
}
554
 
554
 
555
static void r600_mc_program(struct radeon_device *rdev)
555
static void r600_mc_program(struct radeon_device *rdev)
556
{
556
{
557
	struct rv515_mc_save save;
557
	struct rv515_mc_save save;
558
	u32 tmp;
558
	u32 tmp;
559
	int i, j;
559
	int i, j;
560
 
560
 
561
	/* Initialize HDP */
561
	/* Initialize HDP */
562
	for (i = 0, j = 0; i < 32; i++, j += 0x18) {
562
	for (i = 0, j = 0; i < 32; i++, j += 0x18) {
563
		WREG32((0x2c14 + j), 0x00000000);
563
		WREG32((0x2c14 + j), 0x00000000);
564
		WREG32((0x2c18 + j), 0x00000000);
564
		WREG32((0x2c18 + j), 0x00000000);
565
		WREG32((0x2c1c + j), 0x00000000);
565
		WREG32((0x2c1c + j), 0x00000000);
566
		WREG32((0x2c20 + j), 0x00000000);
566
		WREG32((0x2c20 + j), 0x00000000);
567
		WREG32((0x2c24 + j), 0x00000000);
567
		WREG32((0x2c24 + j), 0x00000000);
568
	}
568
	}
569
	WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
569
	WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
570
 
570
 
571
	rv515_mc_stop(rdev, &save);
571
	rv515_mc_stop(rdev, &save);
572
	if (r600_mc_wait_for_idle(rdev)) {
572
	if (r600_mc_wait_for_idle(rdev)) {
573
		dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
573
		dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
574
	}
574
	}
575
	/* Lockout access through VGA aperture (doesn't exist before R600) */
575
	/* Lockout access through VGA aperture (doesn't exist before R600) */
576
	WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
576
	WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
577
	/* Update configuration */
577
	/* Update configuration */
578
	if (rdev->flags & RADEON_IS_AGP) {
578
	if (rdev->flags & RADEON_IS_AGP) {
579
		if (rdev->mc.vram_start < rdev->mc.gtt_start) {
579
		if (rdev->mc.vram_start < rdev->mc.gtt_start) {
580
			/* VRAM before AGP */
580
			/* VRAM before AGP */
581
			WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
581
			WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
582
				rdev->mc.vram_start >> 12);
582
				rdev->mc.vram_start >> 12);
583
			WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
583
			WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
584
				rdev->mc.gtt_end >> 12);
584
				rdev->mc.gtt_end >> 12);
585
		} else {
585
		} else {
586
			/* VRAM after AGP */
586
			/* VRAM after AGP */
587
			WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
587
			WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
588
				rdev->mc.gtt_start >> 12);
588
				rdev->mc.gtt_start >> 12);
589
			WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
589
			WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
590
				rdev->mc.vram_end >> 12);
590
				rdev->mc.vram_end >> 12);
591
		}
591
		}
592
	} else {
592
	} else {
593
		WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start >> 12);
593
		WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start >> 12);
594
		WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.vram_end >> 12);
594
		WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.vram_end >> 12);
595
	}
595
	}
596
	WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0);
596
	WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0);
597
	tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
597
	tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
598
	tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
598
	tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
599
	WREG32(MC_VM_FB_LOCATION, tmp);
599
	WREG32(MC_VM_FB_LOCATION, tmp);
600
	WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
600
	WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
601
	WREG32(HDP_NONSURFACE_INFO, (2 << 7));
601
	WREG32(HDP_NONSURFACE_INFO, (2 << 7));
602
	WREG32(HDP_NONSURFACE_SIZE, rdev->mc.mc_vram_size | 0x3FF);
602
	WREG32(HDP_NONSURFACE_SIZE, rdev->mc.mc_vram_size | 0x3FF);
603
	if (rdev->flags & RADEON_IS_AGP) {
603
	if (rdev->flags & RADEON_IS_AGP) {
604
		WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 22);
604
		WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 22);
605
		WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 22);
605
		WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 22);
606
		WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22);
606
		WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22);
607
	} else {
607
	} else {
608
		WREG32(MC_VM_AGP_BASE, 0);
608
		WREG32(MC_VM_AGP_BASE, 0);
609
		WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
609
		WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
610
		WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
610
		WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
611
	}
611
	}
612
	if (r600_mc_wait_for_idle(rdev)) {
612
	if (r600_mc_wait_for_idle(rdev)) {
613
		dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
613
		dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
614
	}
614
	}
615
	rv515_mc_resume(rdev, &save);
615
	rv515_mc_resume(rdev, &save);
616
	/* we need to own VRAM, so turn off the VGA renderer here
616
	/* we need to own VRAM, so turn off the VGA renderer here
617
	 * to stop it overwriting our objects */
617
	 * to stop it overwriting our objects */
618
	rv515_vga_render_disable(rdev);
618
	rv515_vga_render_disable(rdev);
619
}
619
}
620
 
620
 
621
int r600_mc_init(struct radeon_device *rdev)
621
int r600_mc_init(struct radeon_device *rdev)
622
{
622
{
623
	fixed20_12 a;
623
	fixed20_12 a;
624
	u32 tmp;
624
	u32 tmp;
625
	int chansize, numchan;
625
	int chansize, numchan;
626
 
626
 
627
	/* Get VRAM informations */
627
	/* Get VRAM informations */
628
	rdev->mc.vram_is_ddr = true;
628
	rdev->mc.vram_is_ddr = true;
629
	tmp = RREG32(RAMCFG);
629
	tmp = RREG32(RAMCFG);
630
	if (tmp & CHANSIZE_OVERRIDE) {
630
	if (tmp & CHANSIZE_OVERRIDE) {
631
		chansize = 16;
631
		chansize = 16;
632
	} else if (tmp & CHANSIZE_MASK) {
632
	} else if (tmp & CHANSIZE_MASK) {
633
		chansize = 64;
633
		chansize = 64;
634
	} else {
634
	} else {
635
		chansize = 32;
635
		chansize = 32;
636
	}
636
	}
637
	tmp = RREG32(CHMAP);
637
	tmp = RREG32(CHMAP);
638
	switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
638
	switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
639
	case 0:
639
	case 0:
640
	default:
640
	default:
641
		numchan = 1;
641
		numchan = 1;
642
		break;
642
		break;
643
	case 1:
643
	case 1:
644
		numchan = 2;
644
		numchan = 2;
645
		break;
645
		break;
646
	case 2:
646
	case 2:
647
		numchan = 4;
647
		numchan = 4;
648
		break;
648
		break;
649
	case 3:
649
	case 3:
650
		numchan = 8;
650
		numchan = 8;
651
		break;
651
		break;
652
	}
652
	}
653
	rdev->mc.vram_width = numchan * chansize;
653
	rdev->mc.vram_width = numchan * chansize;
654
	/* Could aper size report 0 ? */
654
	/* Could aper size report 0 ? */
655
	rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
655
	rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
656
	rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
656
	rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
657
	/* Setup GPU memory space */
657
	/* Setup GPU memory space */
658
	rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
658
	rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
659
	rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
659
	rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
660
 
660
 
661
	if (rdev->mc.mc_vram_size > rdev->mc.aper_size)
661
	if (rdev->mc.mc_vram_size > rdev->mc.aper_size)
662
		rdev->mc.mc_vram_size = rdev->mc.aper_size;
662
		rdev->mc.mc_vram_size = rdev->mc.aper_size;
663
 
663
 
664
	if (rdev->mc.real_vram_size > rdev->mc.aper_size)
664
	if (rdev->mc.real_vram_size > rdev->mc.aper_size)
665
		rdev->mc.real_vram_size = rdev->mc.aper_size;
665
		rdev->mc.real_vram_size = rdev->mc.aper_size;
666
 
666
 
667
	if (rdev->flags & RADEON_IS_AGP) {
667
	if (rdev->flags & RADEON_IS_AGP) {
668
		/* gtt_size is setup by radeon_agp_init */
668
		/* gtt_size is setup by radeon_agp_init */
669
		rdev->mc.gtt_location = rdev->mc.agp_base;
669
		rdev->mc.gtt_location = rdev->mc.agp_base;
670
		tmp = 0xFFFFFFFFUL - rdev->mc.agp_base - rdev->mc.gtt_size;
670
		tmp = 0xFFFFFFFFUL - rdev->mc.agp_base - rdev->mc.gtt_size;
671
		/* Try to put vram before or after AGP because we
671
		/* Try to put vram before or after AGP because we
672
		 * we want SYSTEM_APERTURE to cover both VRAM and
672
		 * we want SYSTEM_APERTURE to cover both VRAM and
673
		 * AGP so that GPU can catch out of VRAM/AGP access
673
		 * AGP so that GPU can catch out of VRAM/AGP access
674
		 */
674
		 */
675
		if (rdev->mc.gtt_location > rdev->mc.mc_vram_size) {
675
		if (rdev->mc.gtt_location > rdev->mc.mc_vram_size) {
676
			/* Enough place before */
676
			/* Enough place before */
677
			rdev->mc.vram_location = rdev->mc.gtt_location -
677
			rdev->mc.vram_location = rdev->mc.gtt_location -
678
							rdev->mc.mc_vram_size;
678
							rdev->mc.mc_vram_size;
679
		} else if (tmp > rdev->mc.mc_vram_size) {
679
		} else if (tmp > rdev->mc.mc_vram_size) {
680
			/* Enough place after */
680
			/* Enough place after */
681
			rdev->mc.vram_location = rdev->mc.gtt_location +
681
			rdev->mc.vram_location = rdev->mc.gtt_location +
682
							rdev->mc.gtt_size;
682
							rdev->mc.gtt_size;
683
		} else {
683
		} else {
684
			/* Try to setup VRAM then AGP might not
684
			/* Try to setup VRAM then AGP might not
685
			 * not work on some card
685
			 * not work on some card
686
			 */
686
			 */
687
			rdev->mc.vram_location = 0x00000000UL;
687
			rdev->mc.vram_location = 0x00000000UL;
688
			rdev->mc.gtt_location = rdev->mc.mc_vram_size;
688
			rdev->mc.gtt_location = rdev->mc.mc_vram_size;
689
		}
689
		}
690
	} else {
690
	} else {
691
		rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
691
		rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
692
			rdev->mc.vram_location = (RREG32(MC_VM_FB_LOCATION) &
692
			rdev->mc.vram_location = (RREG32(MC_VM_FB_LOCATION) &
693
								0xFFFF) << 24;
693
								0xFFFF) << 24;
694
			tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size;
694
			tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size;
695
			if ((0xFFFFFFFFUL - tmp) >= rdev->mc.gtt_size) {
695
			if ((0xFFFFFFFFUL - tmp) >= rdev->mc.gtt_size) {
696
				/* Enough place after vram */
696
				/* Enough place after vram */
697
				rdev->mc.gtt_location = tmp;
697
				rdev->mc.gtt_location = tmp;
698
			} else if (rdev->mc.vram_location >= rdev->mc.gtt_size) {
698
			} else if (rdev->mc.vram_location >= rdev->mc.gtt_size) {
699
				/* Enough place before vram */
699
				/* Enough place before vram */
700
				rdev->mc.gtt_location = 0;
700
				rdev->mc.gtt_location = 0;
701
			} else {
701
			} else {
702
				/* Not enough place after or before shrink
702
				/* Not enough place after or before shrink
703
				 * gart size
703
				 * gart size
704
				 */
704
				 */
705
				if (rdev->mc.vram_location > (0xFFFFFFFFUL - tmp)) {
705
				if (rdev->mc.vram_location > (0xFFFFFFFFUL - tmp)) {
706
					rdev->mc.gtt_location = 0;
706
					rdev->mc.gtt_location = 0;
707
					rdev->mc.gtt_size = rdev->mc.vram_location;
707
					rdev->mc.gtt_size = rdev->mc.vram_location;
708
				} else {
708
				} else {
709
					rdev->mc.gtt_location = tmp;
709
					rdev->mc.gtt_location = tmp;
710
					rdev->mc.gtt_size = 0xFFFFFFFFUL - tmp;
710
					rdev->mc.gtt_size = 0xFFFFFFFFUL - tmp;
711
				}
711
				}
712
			}
712
			}
713
			rdev->mc.gtt_location = rdev->mc.mc_vram_size;
713
			rdev->mc.gtt_location = rdev->mc.mc_vram_size;
714
	}
714
	}
715
	rdev->mc.vram_start = rdev->mc.vram_location;
715
	rdev->mc.vram_start = rdev->mc.vram_location;
716
	rdev->mc.vram_end = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1;
716
	rdev->mc.vram_end = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1;
717
	rdev->mc.gtt_start = rdev->mc.gtt_location;
717
	rdev->mc.gtt_start = rdev->mc.gtt_location;
718
	rdev->mc.gtt_end = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
718
	rdev->mc.gtt_end = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
719
	/* FIXME: we should enforce default clock in case GPU is not in
719
	/* FIXME: we should enforce default clock in case GPU is not in
720
	 * default setup
720
	 * default setup
721
	 */
721
	 */
722
	a.full = rfixed_const(100);
722
	a.full = rfixed_const(100);
723
	rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk);
723
	rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk);
724
	rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
724
	rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
725
 
725
 
726
	if (rdev->flags & RADEON_IS_IGP)
726
	if (rdev->flags & RADEON_IS_IGP)
727
		rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
727
		rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
728
 
728
 
729
	return 0;
729
	return 0;
730
}
730
}
731
 
731
 
732
/* We doesn't check that the GPU really needs a reset we simply do the
732
/* We doesn't check that the GPU really needs a reset we simply do the
733
 * reset, it's up to the caller to determine if the GPU needs one. We
733
 * reset, it's up to the caller to determine if the GPU needs one. We
734
 * might add an helper function to check that.
734
 * might add an helper function to check that.
735
 */
735
 */
736
int r600_gpu_soft_reset(struct radeon_device *rdev)
736
int r600_gpu_soft_reset(struct radeon_device *rdev)
737
{
737
{
738
	struct rv515_mc_save save;
738
	struct rv515_mc_save save;
739
	u32 grbm_busy_mask = S_008010_VC_BUSY(1) | S_008010_VGT_BUSY_NO_DMA(1) |
739
	u32 grbm_busy_mask = S_008010_VC_BUSY(1) | S_008010_VGT_BUSY_NO_DMA(1) |
740
				S_008010_VGT_BUSY(1) | S_008010_TA03_BUSY(1) |
740
				S_008010_VGT_BUSY(1) | S_008010_TA03_BUSY(1) |
741
				S_008010_TC_BUSY(1) | S_008010_SX_BUSY(1) |
741
				S_008010_TC_BUSY(1) | S_008010_SX_BUSY(1) |
742
				S_008010_SH_BUSY(1) | S_008010_SPI03_BUSY(1) |
742
				S_008010_SH_BUSY(1) | S_008010_SPI03_BUSY(1) |
743
				S_008010_SMX_BUSY(1) | S_008010_SC_BUSY(1) |
743
				S_008010_SMX_BUSY(1) | S_008010_SC_BUSY(1) |
744
				S_008010_PA_BUSY(1) | S_008010_DB03_BUSY(1) |
744
				S_008010_PA_BUSY(1) | S_008010_DB03_BUSY(1) |
745
				S_008010_CR_BUSY(1) | S_008010_CB03_BUSY(1) |
745
				S_008010_CR_BUSY(1) | S_008010_CB03_BUSY(1) |
746
				S_008010_GUI_ACTIVE(1);
746
				S_008010_GUI_ACTIVE(1);
747
	u32 grbm2_busy_mask = S_008014_SPI0_BUSY(1) | S_008014_SPI1_BUSY(1) |
747
	u32 grbm2_busy_mask = S_008014_SPI0_BUSY(1) | S_008014_SPI1_BUSY(1) |
748
			S_008014_SPI2_BUSY(1) | S_008014_SPI3_BUSY(1) |
748
			S_008014_SPI2_BUSY(1) | S_008014_SPI3_BUSY(1) |
749
			S_008014_TA0_BUSY(1) | S_008014_TA1_BUSY(1) |
749
			S_008014_TA0_BUSY(1) | S_008014_TA1_BUSY(1) |
750
			S_008014_TA2_BUSY(1) | S_008014_TA3_BUSY(1) |
750
			S_008014_TA2_BUSY(1) | S_008014_TA3_BUSY(1) |
751
			S_008014_DB0_BUSY(1) | S_008014_DB1_BUSY(1) |
751
			S_008014_DB0_BUSY(1) | S_008014_DB1_BUSY(1) |
752
			S_008014_DB2_BUSY(1) | S_008014_DB3_BUSY(1) |
752
			S_008014_DB2_BUSY(1) | S_008014_DB3_BUSY(1) |
753
			S_008014_CB0_BUSY(1) | S_008014_CB1_BUSY(1) |
753
			S_008014_CB0_BUSY(1) | S_008014_CB1_BUSY(1) |
754
			S_008014_CB2_BUSY(1) | S_008014_CB3_BUSY(1);
754
			S_008014_CB2_BUSY(1) | S_008014_CB3_BUSY(1);
755
	u32 srbm_reset = 0;
755
	u32 srbm_reset = 0;
756
	u32 tmp;
756
	u32 tmp;
757
 
757
 
758
	dev_info(rdev->dev, "GPU softreset \n");
758
	dev_info(rdev->dev, "GPU softreset \n");
759
	dev_info(rdev->dev, "  R_008010_GRBM_STATUS=0x%08X\n",
759
	dev_info(rdev->dev, "  R_008010_GRBM_STATUS=0x%08X\n",
760
		RREG32(R_008010_GRBM_STATUS));
760
		RREG32(R_008010_GRBM_STATUS));
761
	dev_info(rdev->dev, "  R_008014_GRBM_STATUS2=0x%08X\n",
761
	dev_info(rdev->dev, "  R_008014_GRBM_STATUS2=0x%08X\n",
762
		RREG32(R_008014_GRBM_STATUS2));
762
		RREG32(R_008014_GRBM_STATUS2));
763
	dev_info(rdev->dev, "  R_000E50_SRBM_STATUS=0x%08X\n",
763
	dev_info(rdev->dev, "  R_000E50_SRBM_STATUS=0x%08X\n",
764
		RREG32(R_000E50_SRBM_STATUS));
764
		RREG32(R_000E50_SRBM_STATUS));
765
	rv515_mc_stop(rdev, &save);
765
	rv515_mc_stop(rdev, &save);
766
	if (r600_mc_wait_for_idle(rdev)) {
766
	if (r600_mc_wait_for_idle(rdev)) {
767
		dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
767
		dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
768
	}
768
	}
769
	/* Disable CP parsing/prefetching */
769
	/* Disable CP parsing/prefetching */
770
	WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(0xff));
770
	WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(0xff));
771
	/* Check if any of the rendering block is busy and reset it */
771
	/* Check if any of the rendering block is busy and reset it */
772
	if ((RREG32(R_008010_GRBM_STATUS) & grbm_busy_mask) ||
772
	if ((RREG32(R_008010_GRBM_STATUS) & grbm_busy_mask) ||
773
	    (RREG32(R_008014_GRBM_STATUS2) & grbm2_busy_mask)) {
773
	    (RREG32(R_008014_GRBM_STATUS2) & grbm2_busy_mask)) {
774
		tmp = S_008020_SOFT_RESET_CR(1) |
774
		tmp = S_008020_SOFT_RESET_CR(1) |
775
			S_008020_SOFT_RESET_DB(1) |
775
			S_008020_SOFT_RESET_DB(1) |
776
			S_008020_SOFT_RESET_CB(1) |
776
			S_008020_SOFT_RESET_CB(1) |
777
			S_008020_SOFT_RESET_PA(1) |
777
			S_008020_SOFT_RESET_PA(1) |
778
			S_008020_SOFT_RESET_SC(1) |
778
			S_008020_SOFT_RESET_SC(1) |
779
			S_008020_SOFT_RESET_SMX(1) |
779
			S_008020_SOFT_RESET_SMX(1) |
780
			S_008020_SOFT_RESET_SPI(1) |
780
			S_008020_SOFT_RESET_SPI(1) |
781
			S_008020_SOFT_RESET_SX(1) |
781
			S_008020_SOFT_RESET_SX(1) |
782
			S_008020_SOFT_RESET_SH(1) |
782
			S_008020_SOFT_RESET_SH(1) |
783
			S_008020_SOFT_RESET_TC(1) |
783
			S_008020_SOFT_RESET_TC(1) |
784
			S_008020_SOFT_RESET_TA(1) |
784
			S_008020_SOFT_RESET_TA(1) |
785
			S_008020_SOFT_RESET_VC(1) |
785
			S_008020_SOFT_RESET_VC(1) |
786
			S_008020_SOFT_RESET_VGT(1);
786
			S_008020_SOFT_RESET_VGT(1);
787
		dev_info(rdev->dev, "  R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
787
		dev_info(rdev->dev, "  R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
788
		WREG32(R_008020_GRBM_SOFT_RESET, tmp);
788
		WREG32(R_008020_GRBM_SOFT_RESET, tmp);
789
		(void)RREG32(R_008020_GRBM_SOFT_RESET);
789
		(void)RREG32(R_008020_GRBM_SOFT_RESET);
790
		udelay(50);
790
		udelay(50);
791
		WREG32(R_008020_GRBM_SOFT_RESET, 0);
791
		WREG32(R_008020_GRBM_SOFT_RESET, 0);
792
		(void)RREG32(R_008020_GRBM_SOFT_RESET);
792
		(void)RREG32(R_008020_GRBM_SOFT_RESET);
793
	}
793
	}
794
	/* Reset CP (we always reset CP) */
794
	/* Reset CP (we always reset CP) */
795
	tmp = S_008020_SOFT_RESET_CP(1);
795
	tmp = S_008020_SOFT_RESET_CP(1);
796
	dev_info(rdev->dev, "R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
796
	dev_info(rdev->dev, "R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
797
	WREG32(R_008020_GRBM_SOFT_RESET, tmp);
797
	WREG32(R_008020_GRBM_SOFT_RESET, tmp);
798
	(void)RREG32(R_008020_GRBM_SOFT_RESET);
798
	(void)RREG32(R_008020_GRBM_SOFT_RESET);
799
	udelay(50);
799
	udelay(50);
800
	WREG32(R_008020_GRBM_SOFT_RESET, 0);
800
	WREG32(R_008020_GRBM_SOFT_RESET, 0);
801
	(void)RREG32(R_008020_GRBM_SOFT_RESET);
801
	(void)RREG32(R_008020_GRBM_SOFT_RESET);
802
	/* Reset others GPU block if necessary */
802
	/* Reset others GPU block if necessary */
803
	if (G_000E50_RLC_BUSY(RREG32(R_000E50_SRBM_STATUS)))
803
	if (G_000E50_RLC_BUSY(RREG32(R_000E50_SRBM_STATUS)))
804
		srbm_reset |= S_000E60_SOFT_RESET_RLC(1);
804
		srbm_reset |= S_000E60_SOFT_RESET_RLC(1);
805
	if (G_000E50_GRBM_RQ_PENDING(RREG32(R_000E50_SRBM_STATUS)))
805
	if (G_000E50_GRBM_RQ_PENDING(RREG32(R_000E50_SRBM_STATUS)))
806
		srbm_reset |= S_000E60_SOFT_RESET_GRBM(1);
806
		srbm_reset |= S_000E60_SOFT_RESET_GRBM(1);
807
	if (G_000E50_HI_RQ_PENDING(RREG32(R_000E50_SRBM_STATUS)))
807
	if (G_000E50_HI_RQ_PENDING(RREG32(R_000E50_SRBM_STATUS)))
808
		srbm_reset |= S_000E60_SOFT_RESET_IH(1);
808
		srbm_reset |= S_000E60_SOFT_RESET_IH(1);
809
	if (G_000E50_VMC_BUSY(RREG32(R_000E50_SRBM_STATUS)))
809
	if (G_000E50_VMC_BUSY(RREG32(R_000E50_SRBM_STATUS)))
810
		srbm_reset |= S_000E60_SOFT_RESET_VMC(1);
810
		srbm_reset |= S_000E60_SOFT_RESET_VMC(1);
811
	if (G_000E50_MCB_BUSY(RREG32(R_000E50_SRBM_STATUS)))
811
	if (G_000E50_MCB_BUSY(RREG32(R_000E50_SRBM_STATUS)))
812
		srbm_reset |= S_000E60_SOFT_RESET_MC(1);
812
		srbm_reset |= S_000E60_SOFT_RESET_MC(1);
813
	if (G_000E50_MCDZ_BUSY(RREG32(R_000E50_SRBM_STATUS)))
813
	if (G_000E50_MCDZ_BUSY(RREG32(R_000E50_SRBM_STATUS)))
814
		srbm_reset |= S_000E60_SOFT_RESET_MC(1);
814
		srbm_reset |= S_000E60_SOFT_RESET_MC(1);
815
	if (G_000E50_MCDY_BUSY(RREG32(R_000E50_SRBM_STATUS)))
815
	if (G_000E50_MCDY_BUSY(RREG32(R_000E50_SRBM_STATUS)))
816
		srbm_reset |= S_000E60_SOFT_RESET_MC(1);
816
		srbm_reset |= S_000E60_SOFT_RESET_MC(1);
817
	if (G_000E50_MCDX_BUSY(RREG32(R_000E50_SRBM_STATUS)))
817
	if (G_000E50_MCDX_BUSY(RREG32(R_000E50_SRBM_STATUS)))
818
		srbm_reset |= S_000E60_SOFT_RESET_MC(1);
818
		srbm_reset |= S_000E60_SOFT_RESET_MC(1);
819
	if (G_000E50_MCDW_BUSY(RREG32(R_000E50_SRBM_STATUS)))
819
	if (G_000E50_MCDW_BUSY(RREG32(R_000E50_SRBM_STATUS)))
820
		srbm_reset |= S_000E60_SOFT_RESET_MC(1);
820
		srbm_reset |= S_000E60_SOFT_RESET_MC(1);
821
	if (G_000E50_RLC_BUSY(RREG32(R_000E50_SRBM_STATUS)))
821
	if (G_000E50_RLC_BUSY(RREG32(R_000E50_SRBM_STATUS)))
822
		srbm_reset |= S_000E60_SOFT_RESET_RLC(1);
822
		srbm_reset |= S_000E60_SOFT_RESET_RLC(1);
823
	if (G_000E50_SEM_BUSY(RREG32(R_000E50_SRBM_STATUS)))
823
	if (G_000E50_SEM_BUSY(RREG32(R_000E50_SRBM_STATUS)))
824
		srbm_reset |= S_000E60_SOFT_RESET_SEM(1);
824
		srbm_reset |= S_000E60_SOFT_RESET_SEM(1);
825
	if (G_000E50_BIF_BUSY(RREG32(R_000E50_SRBM_STATUS)))
825
	if (G_000E50_BIF_BUSY(RREG32(R_000E50_SRBM_STATUS)))
826
		srbm_reset |= S_000E60_SOFT_RESET_BIF(1);
826
		srbm_reset |= S_000E60_SOFT_RESET_BIF(1);
827
	dev_info(rdev->dev, "  R_000E60_SRBM_SOFT_RESET=0x%08X\n", srbm_reset);
827
	dev_info(rdev->dev, "  R_000E60_SRBM_SOFT_RESET=0x%08X\n", srbm_reset);
828
	WREG32(R_000E60_SRBM_SOFT_RESET, srbm_reset);
828
	WREG32(R_000E60_SRBM_SOFT_RESET, srbm_reset);
829
	(void)RREG32(R_000E60_SRBM_SOFT_RESET);
829
	(void)RREG32(R_000E60_SRBM_SOFT_RESET);
830
	udelay(50);
830
	udelay(50);
831
	WREG32(R_000E60_SRBM_SOFT_RESET, 0);
831
	WREG32(R_000E60_SRBM_SOFT_RESET, 0);
832
	(void)RREG32(R_000E60_SRBM_SOFT_RESET);
832
	(void)RREG32(R_000E60_SRBM_SOFT_RESET);
833
	WREG32(R_000E60_SRBM_SOFT_RESET, srbm_reset);
833
	WREG32(R_000E60_SRBM_SOFT_RESET, srbm_reset);
834
	(void)RREG32(R_000E60_SRBM_SOFT_RESET);
834
	(void)RREG32(R_000E60_SRBM_SOFT_RESET);
835
	udelay(50);
835
	udelay(50);
836
	WREG32(R_000E60_SRBM_SOFT_RESET, 0);
836
	WREG32(R_000E60_SRBM_SOFT_RESET, 0);
837
	(void)RREG32(R_000E60_SRBM_SOFT_RESET);
837
	(void)RREG32(R_000E60_SRBM_SOFT_RESET);
838
	/* Wait a little for things to settle down */
838
	/* Wait a little for things to settle down */
839
	udelay(50);
839
	udelay(50);
840
	dev_info(rdev->dev, "  R_008010_GRBM_STATUS=0x%08X\n",
840
	dev_info(rdev->dev, "  R_008010_GRBM_STATUS=0x%08X\n",
841
		RREG32(R_008010_GRBM_STATUS));
841
		RREG32(R_008010_GRBM_STATUS));
842
	dev_info(rdev->dev, "  R_008014_GRBM_STATUS2=0x%08X\n",
842
	dev_info(rdev->dev, "  R_008014_GRBM_STATUS2=0x%08X\n",
843
		RREG32(R_008014_GRBM_STATUS2));
843
		RREG32(R_008014_GRBM_STATUS2));
844
	dev_info(rdev->dev, "  R_000E50_SRBM_STATUS=0x%08X\n",
844
	dev_info(rdev->dev, "  R_000E50_SRBM_STATUS=0x%08X\n",
845
		RREG32(R_000E50_SRBM_STATUS));
845
		RREG32(R_000E50_SRBM_STATUS));
846
	/* After reset we need to reinit the asic as GPU often endup in an
846
	/* After reset we need to reinit the asic as GPU often endup in an
847
	 * incoherent state.
847
	 * incoherent state.
848
	 */
848
	 */
849
	atom_asic_init(rdev->mode_info.atom_context);
849
	atom_asic_init(rdev->mode_info.atom_context);
850
	rv515_mc_resume(rdev, &save);
850
	rv515_mc_resume(rdev, &save);
851
	return 0;
851
	return 0;
852
}
852
}
853
 
853
 
854
int r600_gpu_reset(struct radeon_device *rdev)
854
int r600_gpu_reset(struct radeon_device *rdev)
855
{
855
{
856
	return r600_gpu_soft_reset(rdev);
856
	return r600_gpu_soft_reset(rdev);
857
}
857
}
858
 
858
 
859
static u32 r600_get_tile_pipe_to_backend_map(u32 num_tile_pipes,
859
static u32 r600_get_tile_pipe_to_backend_map(u32 num_tile_pipes,
860
					     u32 num_backends,
860
					     u32 num_backends,
861
					     u32 backend_disable_mask)
861
					     u32 backend_disable_mask)
862
{
862
{
863
	u32 backend_map = 0;
863
	u32 backend_map = 0;
864
	u32 enabled_backends_mask;
864
	u32 enabled_backends_mask;
865
	u32 enabled_backends_count;
865
	u32 enabled_backends_count;
866
	u32 cur_pipe;
866
	u32 cur_pipe;
867
	u32 swizzle_pipe[R6XX_MAX_PIPES];
867
	u32 swizzle_pipe[R6XX_MAX_PIPES];
868
	u32 cur_backend;
868
	u32 cur_backend;
869
	u32 i;
869
	u32 i;
870
 
870
 
871
	if (num_tile_pipes > R6XX_MAX_PIPES)
871
	if (num_tile_pipes > R6XX_MAX_PIPES)
872
		num_tile_pipes = R6XX_MAX_PIPES;
872
		num_tile_pipes = R6XX_MAX_PIPES;
873
	if (num_tile_pipes < 1)
873
	if (num_tile_pipes < 1)
874
		num_tile_pipes = 1;
874
		num_tile_pipes = 1;
875
	if (num_backends > R6XX_MAX_BACKENDS)
875
	if (num_backends > R6XX_MAX_BACKENDS)
876
		num_backends = R6XX_MAX_BACKENDS;
876
		num_backends = R6XX_MAX_BACKENDS;
877
	if (num_backends < 1)
877
	if (num_backends < 1)
878
		num_backends = 1;
878
		num_backends = 1;
879
 
879
 
880
	enabled_backends_mask = 0;
880
	enabled_backends_mask = 0;
881
	enabled_backends_count = 0;
881
	enabled_backends_count = 0;
882
	for (i = 0; i < R6XX_MAX_BACKENDS; ++i) {
882
	for (i = 0; i < R6XX_MAX_BACKENDS; ++i) {
883
		if (((backend_disable_mask >> i) & 1) == 0) {
883
		if (((backend_disable_mask >> i) & 1) == 0) {
884
			enabled_backends_mask |= (1 << i);
884
			enabled_backends_mask |= (1 << i);
885
			++enabled_backends_count;
885
			++enabled_backends_count;
886
		}
886
		}
887
		if (enabled_backends_count == num_backends)
887
		if (enabled_backends_count == num_backends)
888
			break;
888
			break;
889
	}
889
	}
890
 
890
 
891
	if (enabled_backends_count == 0) {
891
	if (enabled_backends_count == 0) {
892
		enabled_backends_mask = 1;
892
		enabled_backends_mask = 1;
893
		enabled_backends_count = 1;
893
		enabled_backends_count = 1;
894
	}
894
	}
895
 
895
 
896
	if (enabled_backends_count != num_backends)
896
	if (enabled_backends_count != num_backends)
897
		num_backends = enabled_backends_count;
897
		num_backends = enabled_backends_count;
898
 
898
 
899
	memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * R6XX_MAX_PIPES);
899
	memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * R6XX_MAX_PIPES);
900
	switch (num_tile_pipes) {
900
	switch (num_tile_pipes) {
901
	case 1:
901
	case 1:
902
		swizzle_pipe[0] = 0;
902
		swizzle_pipe[0] = 0;
903
		break;
903
		break;
904
	case 2:
904
	case 2:
905
		swizzle_pipe[0] = 0;
905
		swizzle_pipe[0] = 0;
906
		swizzle_pipe[1] = 1;
906
		swizzle_pipe[1] = 1;
907
		break;
907
		break;
908
	case 3:
908
	case 3:
909
		swizzle_pipe[0] = 0;
909
		swizzle_pipe[0] = 0;
910
		swizzle_pipe[1] = 1;
910
		swizzle_pipe[1] = 1;
911
		swizzle_pipe[2] = 2;
911
		swizzle_pipe[2] = 2;
912
		break;
912
		break;
913
	case 4:
913
	case 4:
914
		swizzle_pipe[0] = 0;
914
		swizzle_pipe[0] = 0;
915
		swizzle_pipe[1] = 1;
915
		swizzle_pipe[1] = 1;
916
		swizzle_pipe[2] = 2;
916
		swizzle_pipe[2] = 2;
917
		swizzle_pipe[3] = 3;
917
		swizzle_pipe[3] = 3;
918
		break;
918
		break;
919
	case 5:
919
	case 5:
920
		swizzle_pipe[0] = 0;
920
		swizzle_pipe[0] = 0;
921
		swizzle_pipe[1] = 1;
921
		swizzle_pipe[1] = 1;
922
		swizzle_pipe[2] = 2;
922
		swizzle_pipe[2] = 2;
923
		swizzle_pipe[3] = 3;
923
		swizzle_pipe[3] = 3;
924
		swizzle_pipe[4] = 4;
924
		swizzle_pipe[4] = 4;
925
		break;
925
		break;
926
	case 6:
926
	case 6:
927
		swizzle_pipe[0] = 0;
927
		swizzle_pipe[0] = 0;
928
		swizzle_pipe[1] = 2;
928
		swizzle_pipe[1] = 2;
929
		swizzle_pipe[2] = 4;
929
		swizzle_pipe[2] = 4;
930
		swizzle_pipe[3] = 5;
930
		swizzle_pipe[3] = 5;
931
		swizzle_pipe[4] = 1;
931
		swizzle_pipe[4] = 1;
932
		swizzle_pipe[5] = 3;
932
		swizzle_pipe[5] = 3;
933
		break;
933
		break;
934
	case 7:
934
	case 7:
935
		swizzle_pipe[0] = 0;
935
		swizzle_pipe[0] = 0;
936
		swizzle_pipe[1] = 2;
936
		swizzle_pipe[1] = 2;
937
		swizzle_pipe[2] = 4;
937
		swizzle_pipe[2] = 4;
938
		swizzle_pipe[3] = 6;
938
		swizzle_pipe[3] = 6;
939
		swizzle_pipe[4] = 1;
939
		swizzle_pipe[4] = 1;
940
		swizzle_pipe[5] = 3;
940
		swizzle_pipe[5] = 3;
941
		swizzle_pipe[6] = 5;
941
		swizzle_pipe[6] = 5;
942
		break;
942
		break;
943
	case 8:
943
	case 8:
944
		swizzle_pipe[0] = 0;
944
		swizzle_pipe[0] = 0;
945
		swizzle_pipe[1] = 2;
945
		swizzle_pipe[1] = 2;
946
		swizzle_pipe[2] = 4;
946
		swizzle_pipe[2] = 4;
947
		swizzle_pipe[3] = 6;
947
		swizzle_pipe[3] = 6;
948
		swizzle_pipe[4] = 1;
948
		swizzle_pipe[4] = 1;
949
		swizzle_pipe[5] = 3;
949
		swizzle_pipe[5] = 3;
950
		swizzle_pipe[6] = 5;
950
		swizzle_pipe[6] = 5;
951
		swizzle_pipe[7] = 7;
951
		swizzle_pipe[7] = 7;
952
		break;
952
		break;
953
	}
953
	}
954
 
954
 
955
	cur_backend = 0;
955
	cur_backend = 0;
956
	for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) {
956
	for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) {
957
		while (((1 << cur_backend) & enabled_backends_mask) == 0)
957
		while (((1 << cur_backend) & enabled_backends_mask) == 0)
958
			cur_backend = (cur_backend + 1) % R6XX_MAX_BACKENDS;
958
			cur_backend = (cur_backend + 1) % R6XX_MAX_BACKENDS;
959
 
959
 
960
		backend_map |= (u32)(((cur_backend & 3) << (swizzle_pipe[cur_pipe] * 2)));
960
		backend_map |= (u32)(((cur_backend & 3) << (swizzle_pipe[cur_pipe] * 2)));
961
 
961
 
962
		cur_backend = (cur_backend + 1) % R6XX_MAX_BACKENDS;
962
		cur_backend = (cur_backend + 1) % R6XX_MAX_BACKENDS;
963
	}
963
	}
964
 
964
 
965
	return backend_map;
965
	return backend_map;
966
}
966
}
967
 
967
 
968
int r600_count_pipe_bits(uint32_t val)
968
int r600_count_pipe_bits(uint32_t val)
969
{
969
{
970
	int i, ret = 0;
970
	int i, ret = 0;
971
 
971
 
972
	for (i = 0; i < 32; i++) {
972
	for (i = 0; i < 32; i++) {
973
		ret += val & 1;
973
		ret += val & 1;
974
		val >>= 1;
974
		val >>= 1;
975
	}
975
	}
976
	return ret;
976
	return ret;
977
}
977
}
978
 
978
 
979
void r600_gpu_init(struct radeon_device *rdev)
979
void r600_gpu_init(struct radeon_device *rdev)
980
{
980
{
981
	u32 tiling_config;
981
	u32 tiling_config;
982
	u32 ramcfg;
982
	u32 ramcfg;
983
	u32 tmp;
983
	u32 tmp;
984
	int i, j;
984
	int i, j;
985
	u32 sq_config;
985
	u32 sq_config;
986
	u32 sq_gpr_resource_mgmt_1 = 0;
986
	u32 sq_gpr_resource_mgmt_1 = 0;
987
	u32 sq_gpr_resource_mgmt_2 = 0;
987
	u32 sq_gpr_resource_mgmt_2 = 0;
988
	u32 sq_thread_resource_mgmt = 0;
988
	u32 sq_thread_resource_mgmt = 0;
989
	u32 sq_stack_resource_mgmt_1 = 0;
989
	u32 sq_stack_resource_mgmt_1 = 0;
990
	u32 sq_stack_resource_mgmt_2 = 0;
990
	u32 sq_stack_resource_mgmt_2 = 0;
991
 
991
 
992
	/* FIXME: implement */
992
	/* FIXME: implement */
993
	switch (rdev->family) {
993
	switch (rdev->family) {
994
	case CHIP_R600:
994
	case CHIP_R600:
995
		rdev->config.r600.max_pipes = 4;
995
		rdev->config.r600.max_pipes = 4;
996
		rdev->config.r600.max_tile_pipes = 8;
996
		rdev->config.r600.max_tile_pipes = 8;
997
		rdev->config.r600.max_simds = 4;
997
		rdev->config.r600.max_simds = 4;
998
		rdev->config.r600.max_backends = 4;
998
		rdev->config.r600.max_backends = 4;
999
		rdev->config.r600.max_gprs = 256;
999
		rdev->config.r600.max_gprs = 256;
1000
		rdev->config.r600.max_threads = 192;
1000
		rdev->config.r600.max_threads = 192;
1001
		rdev->config.r600.max_stack_entries = 256;
1001
		rdev->config.r600.max_stack_entries = 256;
1002
		rdev->config.r600.max_hw_contexts = 8;
1002
		rdev->config.r600.max_hw_contexts = 8;
1003
		rdev->config.r600.max_gs_threads = 16;
1003
		rdev->config.r600.max_gs_threads = 16;
1004
		rdev->config.r600.sx_max_export_size = 128;
1004
		rdev->config.r600.sx_max_export_size = 128;
1005
		rdev->config.r600.sx_max_export_pos_size = 16;
1005
		rdev->config.r600.sx_max_export_pos_size = 16;
1006
		rdev->config.r600.sx_max_export_smx_size = 128;
1006
		rdev->config.r600.sx_max_export_smx_size = 128;
1007
		rdev->config.r600.sq_num_cf_insts = 2;
1007
		rdev->config.r600.sq_num_cf_insts = 2;
1008
		break;
1008
		break;
1009
	case CHIP_RV630:
1009
	case CHIP_RV630:
1010
	case CHIP_RV635:
1010
	case CHIP_RV635:
1011
		rdev->config.r600.max_pipes = 2;
1011
		rdev->config.r600.max_pipes = 2;
1012
		rdev->config.r600.max_tile_pipes = 2;
1012
		rdev->config.r600.max_tile_pipes = 2;
1013
		rdev->config.r600.max_simds = 3;
1013
		rdev->config.r600.max_simds = 3;
1014
		rdev->config.r600.max_backends = 1;
1014
		rdev->config.r600.max_backends = 1;
1015
		rdev->config.r600.max_gprs = 128;
1015
		rdev->config.r600.max_gprs = 128;
1016
		rdev->config.r600.max_threads = 192;
1016
		rdev->config.r600.max_threads = 192;
1017
		rdev->config.r600.max_stack_entries = 128;
1017
		rdev->config.r600.max_stack_entries = 128;
1018
		rdev->config.r600.max_hw_contexts = 8;
1018
		rdev->config.r600.max_hw_contexts = 8;
1019
		rdev->config.r600.max_gs_threads = 4;
1019
		rdev->config.r600.max_gs_threads = 4;
1020
		rdev->config.r600.sx_max_export_size = 128;
1020
		rdev->config.r600.sx_max_export_size = 128;
1021
		rdev->config.r600.sx_max_export_pos_size = 16;
1021
		rdev->config.r600.sx_max_export_pos_size = 16;
1022
		rdev->config.r600.sx_max_export_smx_size = 128;
1022
		rdev->config.r600.sx_max_export_smx_size = 128;
1023
		rdev->config.r600.sq_num_cf_insts = 2;
1023
		rdev->config.r600.sq_num_cf_insts = 2;
1024
		break;
1024
		break;
1025
	case CHIP_RV610:
1025
	case CHIP_RV610:
1026
	case CHIP_RV620:
1026
	case CHIP_RV620:
1027
	case CHIP_RS780:
1027
	case CHIP_RS780:
1028
	case CHIP_RS880:
1028
	case CHIP_RS880:
1029
		rdev->config.r600.max_pipes = 1;
1029
		rdev->config.r600.max_pipes = 1;
1030
		rdev->config.r600.max_tile_pipes = 1;
1030
		rdev->config.r600.max_tile_pipes = 1;
1031
		rdev->config.r600.max_simds = 2;
1031
		rdev->config.r600.max_simds = 2;
1032
		rdev->config.r600.max_backends = 1;
1032
		rdev->config.r600.max_backends = 1;
1033
		rdev->config.r600.max_gprs = 128;
1033
		rdev->config.r600.max_gprs = 128;
1034
		rdev->config.r600.max_threads = 192;
1034
		rdev->config.r600.max_threads = 192;
1035
		rdev->config.r600.max_stack_entries = 128;
1035
		rdev->config.r600.max_stack_entries = 128;
1036
		rdev->config.r600.max_hw_contexts = 4;
1036
		rdev->config.r600.max_hw_contexts = 4;
1037
		rdev->config.r600.max_gs_threads = 4;
1037
		rdev->config.r600.max_gs_threads = 4;
1038
		rdev->config.r600.sx_max_export_size = 128;
1038
		rdev->config.r600.sx_max_export_size = 128;
1039
		rdev->config.r600.sx_max_export_pos_size = 16;
1039
		rdev->config.r600.sx_max_export_pos_size = 16;
1040
		rdev->config.r600.sx_max_export_smx_size = 128;
1040
		rdev->config.r600.sx_max_export_smx_size = 128;
1041
		rdev->config.r600.sq_num_cf_insts = 1;
1041
		rdev->config.r600.sq_num_cf_insts = 1;
1042
		break;
1042
		break;
1043
	case CHIP_RV670:
1043
	case CHIP_RV670:
1044
		rdev->config.r600.max_pipes = 4;
1044
		rdev->config.r600.max_pipes = 4;
1045
		rdev->config.r600.max_tile_pipes = 4;
1045
		rdev->config.r600.max_tile_pipes = 4;
1046
		rdev->config.r600.max_simds = 4;
1046
		rdev->config.r600.max_simds = 4;
1047
		rdev->config.r600.max_backends = 4;
1047
		rdev->config.r600.max_backends = 4;
1048
		rdev->config.r600.max_gprs = 192;
1048
		rdev->config.r600.max_gprs = 192;
1049
		rdev->config.r600.max_threads = 192;
1049
		rdev->config.r600.max_threads = 192;
1050
		rdev->config.r600.max_stack_entries = 256;
1050
		rdev->config.r600.max_stack_entries = 256;
1051
		rdev->config.r600.max_hw_contexts = 8;
1051
		rdev->config.r600.max_hw_contexts = 8;
1052
		rdev->config.r600.max_gs_threads = 16;
1052
		rdev->config.r600.max_gs_threads = 16;
1053
		rdev->config.r600.sx_max_export_size = 128;
1053
		rdev->config.r600.sx_max_export_size = 128;
1054
		rdev->config.r600.sx_max_export_pos_size = 16;
1054
		rdev->config.r600.sx_max_export_pos_size = 16;
1055
		rdev->config.r600.sx_max_export_smx_size = 128;
1055
		rdev->config.r600.sx_max_export_smx_size = 128;
1056
		rdev->config.r600.sq_num_cf_insts = 2;
1056
		rdev->config.r600.sq_num_cf_insts = 2;
1057
		break;
1057
		break;
1058
	default:
1058
	default:
1059
		break;
1059
		break;
1060
	}
1060
	}
1061
 
1061
 
1062
	/* Initialize HDP */
1062
	/* Initialize HDP */
1063
	for (i = 0, j = 0; i < 32; i++, j += 0x18) {
1063
	for (i = 0, j = 0; i < 32; i++, j += 0x18) {
1064
		WREG32((0x2c14 + j), 0x00000000);
1064
		WREG32((0x2c14 + j), 0x00000000);
1065
		WREG32((0x2c18 + j), 0x00000000);
1065
		WREG32((0x2c18 + j), 0x00000000);
1066
		WREG32((0x2c1c + j), 0x00000000);
1066
		WREG32((0x2c1c + j), 0x00000000);
1067
		WREG32((0x2c20 + j), 0x00000000);
1067
		WREG32((0x2c20 + j), 0x00000000);
1068
		WREG32((0x2c24 + j), 0x00000000);
1068
		WREG32((0x2c24 + j), 0x00000000);
1069
	}
1069
	}
1070
 
1070
 
1071
	WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
1071
	WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
1072
 
1072
 
1073
	/* Setup tiling */
1073
	/* Setup tiling */
1074
	tiling_config = 0;
1074
	tiling_config = 0;
1075
	ramcfg = RREG32(RAMCFG);
1075
	ramcfg = RREG32(RAMCFG);
1076
	switch (rdev->config.r600.max_tile_pipes) {
1076
	switch (rdev->config.r600.max_tile_pipes) {
1077
	case 1:
1077
	case 1:
1078
		tiling_config |= PIPE_TILING(0);
1078
		tiling_config |= PIPE_TILING(0);
1079
		break;
1079
		break;
1080
	case 2:
1080
	case 2:
1081
		tiling_config |= PIPE_TILING(1);
1081
		tiling_config |= PIPE_TILING(1);
1082
		break;
1082
		break;
1083
	case 4:
1083
	case 4:
1084
		tiling_config |= PIPE_TILING(2);
1084
		tiling_config |= PIPE_TILING(2);
1085
		break;
1085
		break;
1086
	case 8:
1086
	case 8:
1087
		tiling_config |= PIPE_TILING(3);
1087
		tiling_config |= PIPE_TILING(3);
1088
		break;
1088
		break;
1089
	default:
1089
	default:
1090
		break;
1090
		break;
1091
	}
1091
	}
1092
	tiling_config |= BANK_TILING((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
1092
	tiling_config |= BANK_TILING((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
1093
	tiling_config |= GROUP_SIZE(0);
1093
	tiling_config |= GROUP_SIZE(0);
1094
	tmp = (ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT;
1094
	tmp = (ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT;
1095
	if (tmp > 3) {
1095
	if (tmp > 3) {
1096
		tiling_config |= ROW_TILING(3);
1096
		tiling_config |= ROW_TILING(3);
1097
		tiling_config |= SAMPLE_SPLIT(3);
1097
		tiling_config |= SAMPLE_SPLIT(3);
1098
	} else {
1098
	} else {
1099
		tiling_config |= ROW_TILING(tmp);
1099
		tiling_config |= ROW_TILING(tmp);
1100
		tiling_config |= SAMPLE_SPLIT(tmp);
1100
		tiling_config |= SAMPLE_SPLIT(tmp);
1101
	}
1101
	}
1102
	tiling_config |= BANK_SWAPS(1);
1102
	tiling_config |= BANK_SWAPS(1);
1103
	tmp = r600_get_tile_pipe_to_backend_map(rdev->config.r600.max_tile_pipes,
1103
	tmp = r600_get_tile_pipe_to_backend_map(rdev->config.r600.max_tile_pipes,
1104
						rdev->config.r600.max_backends,
1104
						rdev->config.r600.max_backends,
1105
						(0xff << rdev->config.r600.max_backends) & 0xff);
1105
						(0xff << rdev->config.r600.max_backends) & 0xff);
1106
	tiling_config |= BACKEND_MAP(tmp);
1106
	tiling_config |= BACKEND_MAP(tmp);
1107
	WREG32(GB_TILING_CONFIG, tiling_config);
1107
	WREG32(GB_TILING_CONFIG, tiling_config);
1108
	WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff);
1108
	WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff);
1109
	WREG32(HDP_TILING_CONFIG, tiling_config & 0xffff);
1109
	WREG32(HDP_TILING_CONFIG, tiling_config & 0xffff);
1110
 
1110
 
1111
	tmp = BACKEND_DISABLE((R6XX_MAX_BACKENDS_MASK << rdev->config.r600.max_backends) & R6XX_MAX_BACKENDS_MASK);
1111
	tmp = BACKEND_DISABLE((R6XX_MAX_BACKENDS_MASK << rdev->config.r600.max_backends) & R6XX_MAX_BACKENDS_MASK);
1112
	WREG32(CC_RB_BACKEND_DISABLE, tmp);
1112
	WREG32(CC_RB_BACKEND_DISABLE, tmp);
1113
 
1113
 
1114
	/* Setup pipes */
1114
	/* Setup pipes */
1115
	tmp = INACTIVE_QD_PIPES((R6XX_MAX_PIPES_MASK << rdev->config.r600.max_pipes) & R6XX_MAX_PIPES_MASK);
1115
	tmp = INACTIVE_QD_PIPES((R6XX_MAX_PIPES_MASK << rdev->config.r600.max_pipes) & R6XX_MAX_PIPES_MASK);
1116
	tmp |= INACTIVE_SIMDS((R6XX_MAX_SIMDS_MASK << rdev->config.r600.max_simds) & R6XX_MAX_SIMDS_MASK);
1116
	tmp |= INACTIVE_SIMDS((R6XX_MAX_SIMDS_MASK << rdev->config.r600.max_simds) & R6XX_MAX_SIMDS_MASK);
1117
	WREG32(CC_GC_SHADER_PIPE_CONFIG, tmp);
1117
	WREG32(CC_GC_SHADER_PIPE_CONFIG, tmp);
1118
	WREG32(GC_USER_SHADER_PIPE_CONFIG, tmp);
1118
	WREG32(GC_USER_SHADER_PIPE_CONFIG, tmp);
1119
 
1119
 
1120
	tmp = R6XX_MAX_BACKENDS - r600_count_pipe_bits(tmp & INACTIVE_QD_PIPES_MASK);
1120
	tmp = R6XX_MAX_BACKENDS - r600_count_pipe_bits(tmp & INACTIVE_QD_PIPES_MASK);
1121
	WREG32(VGT_OUT_DEALLOC_CNTL, (tmp * 4) & DEALLOC_DIST_MASK);
1121
	WREG32(VGT_OUT_DEALLOC_CNTL, (tmp * 4) & DEALLOC_DIST_MASK);
1122
	WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((tmp * 4) - 2) & VTX_REUSE_DEPTH_MASK);
1122
	WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((tmp * 4) - 2) & VTX_REUSE_DEPTH_MASK);
1123
 
1123
 
1124
	/* Setup some CP states */
1124
	/* Setup some CP states */
1125
	WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) | ROQ_IB2_START(0x2b)));
1125
	WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) | ROQ_IB2_START(0x2b)));
1126
	WREG32(CP_MEQ_THRESHOLDS, (MEQ_END(0x40) | ROQ_END(0x40)));
1126
	WREG32(CP_MEQ_THRESHOLDS, (MEQ_END(0x40) | ROQ_END(0x40)));
1127
 
1127
 
1128
	WREG32(TA_CNTL_AUX, (DISABLE_CUBE_ANISO | SYNC_GRADIENT |
1128
	WREG32(TA_CNTL_AUX, (DISABLE_CUBE_ANISO | SYNC_GRADIENT |
1129
			     SYNC_WALKER | SYNC_ALIGNER));
1129
			     SYNC_WALKER | SYNC_ALIGNER));
1130
	/* Setup various GPU states */
1130
	/* Setup various GPU states */
1131
	if (rdev->family == CHIP_RV670)
1131
	if (rdev->family == CHIP_RV670)
1132
		WREG32(ARB_GDEC_RD_CNTL, 0x00000021);
1132
		WREG32(ARB_GDEC_RD_CNTL, 0x00000021);
1133
 
1133
 
1134
	tmp = RREG32(SX_DEBUG_1);
1134
	tmp = RREG32(SX_DEBUG_1);
1135
	tmp |= SMX_EVENT_RELEASE;
1135
	tmp |= SMX_EVENT_RELEASE;
1136
	if ((rdev->family > CHIP_R600))
1136
	if ((rdev->family > CHIP_R600))
1137
		tmp |= ENABLE_NEW_SMX_ADDRESS;
1137
		tmp |= ENABLE_NEW_SMX_ADDRESS;
1138
	WREG32(SX_DEBUG_1, tmp);
1138
	WREG32(SX_DEBUG_1, tmp);
1139
 
1139
 
1140
	if (((rdev->family) == CHIP_R600) ||
1140
	if (((rdev->family) == CHIP_R600) ||
1141
	    ((rdev->family) == CHIP_RV630) ||
1141
	    ((rdev->family) == CHIP_RV630) ||
1142
	    ((rdev->family) == CHIP_RV610) ||
1142
	    ((rdev->family) == CHIP_RV610) ||
1143
	    ((rdev->family) == CHIP_RV620) ||
1143
	    ((rdev->family) == CHIP_RV620) ||
1144
	    ((rdev->family) == CHIP_RS780) ||
1144
	    ((rdev->family) == CHIP_RS780) ||
1145
	    ((rdev->family) == CHIP_RS880)) {
1145
	    ((rdev->family) == CHIP_RS880)) {
1146
		WREG32(DB_DEBUG, PREZ_MUST_WAIT_FOR_POSTZ_DONE);
1146
		WREG32(DB_DEBUG, PREZ_MUST_WAIT_FOR_POSTZ_DONE);
1147
	} else {
1147
	} else {
1148
		WREG32(DB_DEBUG, 0);
1148
		WREG32(DB_DEBUG, 0);
1149
	}
1149
	}
1150
	WREG32(DB_WATERMARKS, (DEPTH_FREE(4) | DEPTH_CACHELINE_FREE(16) |
1150
	WREG32(DB_WATERMARKS, (DEPTH_FREE(4) | DEPTH_CACHELINE_FREE(16) |
1151
			       DEPTH_FLUSH(16) | DEPTH_PENDING_FREE(4)));
1151
			       DEPTH_FLUSH(16) | DEPTH_PENDING_FREE(4)));
1152
 
1152
 
1153
	WREG32(PA_SC_MULTI_CHIP_CNTL, 0);
1153
	WREG32(PA_SC_MULTI_CHIP_CNTL, 0);
1154
	WREG32(VGT_NUM_INSTANCES, 0);
1154
	WREG32(VGT_NUM_INSTANCES, 0);
1155
 
1155
 
1156
	WREG32(SPI_CONFIG_CNTL, GPR_WRITE_PRIORITY(0));
1156
	WREG32(SPI_CONFIG_CNTL, GPR_WRITE_PRIORITY(0));
1157
	WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(0));
1157
	WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(0));
1158
 
1158
 
1159
	tmp = RREG32(SQ_MS_FIFO_SIZES);
1159
	tmp = RREG32(SQ_MS_FIFO_SIZES);
1160
	if (((rdev->family) == CHIP_RV610) ||
1160
	if (((rdev->family) == CHIP_RV610) ||
1161
	    ((rdev->family) == CHIP_RV620) ||
1161
	    ((rdev->family) == CHIP_RV620) ||
1162
	    ((rdev->family) == CHIP_RS780) ||
1162
	    ((rdev->family) == CHIP_RS780) ||
1163
	    ((rdev->family) == CHIP_RS880)) {
1163
	    ((rdev->family) == CHIP_RS880)) {
1164
		tmp = (CACHE_FIFO_SIZE(0xa) |
1164
		tmp = (CACHE_FIFO_SIZE(0xa) |
1165
		       FETCH_FIFO_HIWATER(0xa) |
1165
		       FETCH_FIFO_HIWATER(0xa) |
1166
		       DONE_FIFO_HIWATER(0xe0) |
1166
		       DONE_FIFO_HIWATER(0xe0) |
1167
		       ALU_UPDATE_FIFO_HIWATER(0x8));
1167
		       ALU_UPDATE_FIFO_HIWATER(0x8));
1168
	} else if (((rdev->family) == CHIP_R600) ||
1168
	} else if (((rdev->family) == CHIP_R600) ||
1169
		   ((rdev->family) == CHIP_RV630)) {
1169
		   ((rdev->family) == CHIP_RV630)) {
1170
		tmp &= ~DONE_FIFO_HIWATER(0xff);
1170
		tmp &= ~DONE_FIFO_HIWATER(0xff);
1171
		tmp |= DONE_FIFO_HIWATER(0x4);
1171
		tmp |= DONE_FIFO_HIWATER(0x4);
1172
	}
1172
	}
1173
	WREG32(SQ_MS_FIFO_SIZES, tmp);
1173
	WREG32(SQ_MS_FIFO_SIZES, tmp);
1174
 
1174
 
1175
	/* SQ_CONFIG, SQ_GPR_RESOURCE_MGMT, SQ_THREAD_RESOURCE_MGMT, SQ_STACK_RESOURCE_MGMT
1175
	/* SQ_CONFIG, SQ_GPR_RESOURCE_MGMT, SQ_THREAD_RESOURCE_MGMT, SQ_STACK_RESOURCE_MGMT
1176
	 * should be adjusted as needed by the 2D/3D drivers.  This just sets default values
1176
	 * should be adjusted as needed by the 2D/3D drivers.  This just sets default values
1177
	 */
1177
	 */
1178
	sq_config = RREG32(SQ_CONFIG);
1178
	sq_config = RREG32(SQ_CONFIG);
1179
	sq_config &= ~(PS_PRIO(3) |
1179
	sq_config &= ~(PS_PRIO(3) |
1180
		       VS_PRIO(3) |
1180
		       VS_PRIO(3) |
1181
		       GS_PRIO(3) |
1181
		       GS_PRIO(3) |
1182
		       ES_PRIO(3));
1182
		       ES_PRIO(3));
1183
	sq_config |= (DX9_CONSTS |
1183
	sq_config |= (DX9_CONSTS |
1184
		      VC_ENABLE |
1184
		      VC_ENABLE |
1185
		      PS_PRIO(0) |
1185
		      PS_PRIO(0) |
1186
		      VS_PRIO(1) |
1186
		      VS_PRIO(1) |
1187
		      GS_PRIO(2) |
1187
		      GS_PRIO(2) |
1188
		      ES_PRIO(3));
1188
		      ES_PRIO(3));
1189
 
1189
 
1190
	if ((rdev->family) == CHIP_R600) {
1190
	if ((rdev->family) == CHIP_R600) {
1191
		sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(124) |
1191
		sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(124) |
1192
					  NUM_VS_GPRS(124) |
1192
					  NUM_VS_GPRS(124) |
1193
					  NUM_CLAUSE_TEMP_GPRS(4));
1193
					  NUM_CLAUSE_TEMP_GPRS(4));
1194
		sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(0) |
1194
		sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(0) |
1195
					  NUM_ES_GPRS(0));
1195
					  NUM_ES_GPRS(0));
1196
		sq_thread_resource_mgmt = (NUM_PS_THREADS(136) |
1196
		sq_thread_resource_mgmt = (NUM_PS_THREADS(136) |
1197
					   NUM_VS_THREADS(48) |
1197
					   NUM_VS_THREADS(48) |
1198
					   NUM_GS_THREADS(4) |
1198
					   NUM_GS_THREADS(4) |
1199
					   NUM_ES_THREADS(4));
1199
					   NUM_ES_THREADS(4));
1200
		sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(128) |
1200
		sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(128) |
1201
					    NUM_VS_STACK_ENTRIES(128));
1201
					    NUM_VS_STACK_ENTRIES(128));
1202
		sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(0) |
1202
		sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(0) |
1203
					    NUM_ES_STACK_ENTRIES(0));
1203
					    NUM_ES_STACK_ENTRIES(0));
1204
	} else if (((rdev->family) == CHIP_RV610) ||
1204
	} else if (((rdev->family) == CHIP_RV610) ||
1205
		   ((rdev->family) == CHIP_RV620) ||
1205
		   ((rdev->family) == CHIP_RV620) ||
1206
		   ((rdev->family) == CHIP_RS780) ||
1206
		   ((rdev->family) == CHIP_RS780) ||
1207
		   ((rdev->family) == CHIP_RS880)) {
1207
		   ((rdev->family) == CHIP_RS880)) {
1208
		/* no vertex cache */
1208
		/* no vertex cache */
1209
		sq_config &= ~VC_ENABLE;
1209
		sq_config &= ~VC_ENABLE;
1210
 
1210
 
1211
		sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
1211
		sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
1212
					  NUM_VS_GPRS(44) |
1212
					  NUM_VS_GPRS(44) |
1213
					  NUM_CLAUSE_TEMP_GPRS(2));
1213
					  NUM_CLAUSE_TEMP_GPRS(2));
1214
		sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(17) |
1214
		sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(17) |
1215
					  NUM_ES_GPRS(17));
1215
					  NUM_ES_GPRS(17));
1216
		sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
1216
		sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
1217
					   NUM_VS_THREADS(78) |
1217
					   NUM_VS_THREADS(78) |
1218
					   NUM_GS_THREADS(4) |
1218
					   NUM_GS_THREADS(4) |
1219
					   NUM_ES_THREADS(31));
1219
					   NUM_ES_THREADS(31));
1220
		sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(40) |
1220
		sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(40) |
1221
					    NUM_VS_STACK_ENTRIES(40));
1221
					    NUM_VS_STACK_ENTRIES(40));
1222
		sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(32) |
1222
		sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(32) |
1223
					    NUM_ES_STACK_ENTRIES(16));
1223
					    NUM_ES_STACK_ENTRIES(16));
1224
	} else if (((rdev->family) == CHIP_RV630) ||
1224
	} else if (((rdev->family) == CHIP_RV630) ||
1225
		   ((rdev->family) == CHIP_RV635)) {
1225
		   ((rdev->family) == CHIP_RV635)) {
1226
		sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
1226
		sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
1227
					  NUM_VS_GPRS(44) |
1227
					  NUM_VS_GPRS(44) |
1228
					  NUM_CLAUSE_TEMP_GPRS(2));
1228
					  NUM_CLAUSE_TEMP_GPRS(2));
1229
		sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(18) |
1229
		sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(18) |
1230
					  NUM_ES_GPRS(18));
1230
					  NUM_ES_GPRS(18));
1231
		sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
1231
		sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
1232
					   NUM_VS_THREADS(78) |
1232
					   NUM_VS_THREADS(78) |
1233
					   NUM_GS_THREADS(4) |
1233
					   NUM_GS_THREADS(4) |
1234
					   NUM_ES_THREADS(31));
1234
					   NUM_ES_THREADS(31));
1235
		sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(40) |
1235
		sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(40) |
1236
					    NUM_VS_STACK_ENTRIES(40));
1236
					    NUM_VS_STACK_ENTRIES(40));
1237
		sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(32) |
1237
		sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(32) |
1238
					    NUM_ES_STACK_ENTRIES(16));
1238
					    NUM_ES_STACK_ENTRIES(16));
1239
	} else if ((rdev->family) == CHIP_RV670) {
1239
	} else if ((rdev->family) == CHIP_RV670) {
1240
		sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
1240
		sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
1241
					  NUM_VS_GPRS(44) |
1241
					  NUM_VS_GPRS(44) |
1242
					  NUM_CLAUSE_TEMP_GPRS(2));
1242
					  NUM_CLAUSE_TEMP_GPRS(2));
1243
		sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(17) |
1243
		sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(17) |
1244
					  NUM_ES_GPRS(17));
1244
					  NUM_ES_GPRS(17));
1245
		sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
1245
		sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
1246
					   NUM_VS_THREADS(78) |
1246
					   NUM_VS_THREADS(78) |
1247
					   NUM_GS_THREADS(4) |
1247
					   NUM_GS_THREADS(4) |
1248
					   NUM_ES_THREADS(31));
1248
					   NUM_ES_THREADS(31));
1249
		sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(64) |
1249
		sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(64) |
1250
					    NUM_VS_STACK_ENTRIES(64));
1250
					    NUM_VS_STACK_ENTRIES(64));
1251
		sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(64) |
1251
		sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(64) |
1252
					    NUM_ES_STACK_ENTRIES(64));
1252
					    NUM_ES_STACK_ENTRIES(64));
1253
	}
1253
	}
1254
 
1254
 
1255
	WREG32(SQ_CONFIG, sq_config);
1255
	WREG32(SQ_CONFIG, sq_config);
1256
	WREG32(SQ_GPR_RESOURCE_MGMT_1,  sq_gpr_resource_mgmt_1);
1256
	WREG32(SQ_GPR_RESOURCE_MGMT_1,  sq_gpr_resource_mgmt_1);
1257
	WREG32(SQ_GPR_RESOURCE_MGMT_2,  sq_gpr_resource_mgmt_2);
1257
	WREG32(SQ_GPR_RESOURCE_MGMT_2,  sq_gpr_resource_mgmt_2);
1258
	WREG32(SQ_THREAD_RESOURCE_MGMT, sq_thread_resource_mgmt);
1258
	WREG32(SQ_THREAD_RESOURCE_MGMT, sq_thread_resource_mgmt);
1259
	WREG32(SQ_STACK_RESOURCE_MGMT_1, sq_stack_resource_mgmt_1);
1259
	WREG32(SQ_STACK_RESOURCE_MGMT_1, sq_stack_resource_mgmt_1);
1260
	WREG32(SQ_STACK_RESOURCE_MGMT_2, sq_stack_resource_mgmt_2);
1260
	WREG32(SQ_STACK_RESOURCE_MGMT_2, sq_stack_resource_mgmt_2);
1261
 
1261
 
1262
	if (((rdev->family) == CHIP_RV610) ||
1262
	if (((rdev->family) == CHIP_RV610) ||
1263
	    ((rdev->family) == CHIP_RV620) ||
1263
	    ((rdev->family) == CHIP_RV620) ||
1264
	    ((rdev->family) == CHIP_RS780) ||
1264
	    ((rdev->family) == CHIP_RS780) ||
1265
	    ((rdev->family) == CHIP_RS880)) {
1265
	    ((rdev->family) == CHIP_RS880)) {
1266
		WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(TC_ONLY));
1266
		WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(TC_ONLY));
1267
	} else {
1267
	} else {
1268
		WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC));
1268
		WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC));
1269
	}
1269
	}
1270
 
1270
 
1271
	/* More default values. 2D/3D driver should adjust as needed */
1271
	/* More default values. 2D/3D driver should adjust as needed */
1272
	WREG32(PA_SC_AA_SAMPLE_LOCS_2S, (S0_X(0xc) | S0_Y(0x4) |
1272
	WREG32(PA_SC_AA_SAMPLE_LOCS_2S, (S0_X(0xc) | S0_Y(0x4) |
1273
					 S1_X(0x4) | S1_Y(0xc)));
1273
					 S1_X(0x4) | S1_Y(0xc)));
1274
	WREG32(PA_SC_AA_SAMPLE_LOCS_4S, (S0_X(0xe) | S0_Y(0xe) |
1274
	WREG32(PA_SC_AA_SAMPLE_LOCS_4S, (S0_X(0xe) | S0_Y(0xe) |
1275
					 S1_X(0x2) | S1_Y(0x2) |
1275
					 S1_X(0x2) | S1_Y(0x2) |
1276
					 S2_X(0xa) | S2_Y(0x6) |
1276
					 S2_X(0xa) | S2_Y(0x6) |
1277
					 S3_X(0x6) | S3_Y(0xa)));
1277
					 S3_X(0x6) | S3_Y(0xa)));
1278
	WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD0, (S0_X(0xe) | S0_Y(0xb) |
1278
	WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD0, (S0_X(0xe) | S0_Y(0xb) |
1279
					     S1_X(0x4) | S1_Y(0xc) |
1279
					     S1_X(0x4) | S1_Y(0xc) |
1280
					     S2_X(0x1) | S2_Y(0x6) |
1280
					     S2_X(0x1) | S2_Y(0x6) |
1281
					     S3_X(0xa) | S3_Y(0xe)));
1281
					     S3_X(0xa) | S3_Y(0xe)));
1282
	WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD1, (S4_X(0x6) | S4_Y(0x1) |
1282
	WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD1, (S4_X(0x6) | S4_Y(0x1) |
1283
					     S5_X(0x0) | S5_Y(0x0) |
1283
					     S5_X(0x0) | S5_Y(0x0) |
1284
					     S6_X(0xb) | S6_Y(0x4) |
1284
					     S6_X(0xb) | S6_Y(0x4) |
1285
					     S7_X(0x7) | S7_Y(0x8)));
1285
					     S7_X(0x7) | S7_Y(0x8)));
1286
 
1286
 
1287
	WREG32(VGT_STRMOUT_EN, 0);
1287
	WREG32(VGT_STRMOUT_EN, 0);
1288
	tmp = rdev->config.r600.max_pipes * 16;
1288
	tmp = rdev->config.r600.max_pipes * 16;
1289
	switch (rdev->family) {
1289
	switch (rdev->family) {
1290
	case CHIP_RV610:
1290
	case CHIP_RV610:
1291
	case CHIP_RV620:
1291
	case CHIP_RV620:
1292
	case CHIP_RS780:
1292
	case CHIP_RS780:
1293
	case CHIP_RS880:
1293
	case CHIP_RS880:
1294
		tmp += 32;
1294
		tmp += 32;
1295
		break;
1295
		break;
1296
	case CHIP_RV670:
1296
	case CHIP_RV670:
1297
		tmp += 128;
1297
		tmp += 128;
1298
		break;
1298
		break;
1299
	default:
1299
	default:
1300
		break;
1300
		break;
1301
	}
1301
	}
1302
	if (tmp > 256) {
1302
	if (tmp > 256) {
1303
		tmp = 256;
1303
		tmp = 256;
1304
	}
1304
	}
1305
	WREG32(VGT_ES_PER_GS, 128);
1305
	WREG32(VGT_ES_PER_GS, 128);
1306
	WREG32(VGT_GS_PER_ES, tmp);
1306
	WREG32(VGT_GS_PER_ES, tmp);
1307
	WREG32(VGT_GS_PER_VS, 2);
1307
	WREG32(VGT_GS_PER_VS, 2);
1308
	WREG32(VGT_GS_VERTEX_REUSE, 16);
1308
	WREG32(VGT_GS_VERTEX_REUSE, 16);
1309
 
1309
 
1310
	/* more default values. 2D/3D driver should adjust as needed */
1310
	/* more default values. 2D/3D driver should adjust as needed */
1311
	WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
1311
	WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
1312
	WREG32(VGT_STRMOUT_EN, 0);
1312
	WREG32(VGT_STRMOUT_EN, 0);
1313
	WREG32(SX_MISC, 0);
1313
	WREG32(SX_MISC, 0);
1314
	WREG32(PA_SC_MODE_CNTL, 0);
1314
	WREG32(PA_SC_MODE_CNTL, 0);
1315
	WREG32(PA_SC_AA_CONFIG, 0);
1315
	WREG32(PA_SC_AA_CONFIG, 0);
1316
	WREG32(PA_SC_LINE_STIPPLE, 0);
1316
	WREG32(PA_SC_LINE_STIPPLE, 0);
1317
	WREG32(SPI_INPUT_Z, 0);
1317
	WREG32(SPI_INPUT_Z, 0);
1318
	WREG32(SPI_PS_IN_CONTROL_0, NUM_INTERP(2));
1318
	WREG32(SPI_PS_IN_CONTROL_0, NUM_INTERP(2));
1319
	WREG32(CB_COLOR7_FRAG, 0);
1319
	WREG32(CB_COLOR7_FRAG, 0);
1320
 
1320
 
1321
	/* Clear render buffer base addresses */
1321
	/* Clear render buffer base addresses */
1322
	WREG32(CB_COLOR0_BASE, 0);
1322
	WREG32(CB_COLOR0_BASE, 0);
1323
	WREG32(CB_COLOR1_BASE, 0);
1323
	WREG32(CB_COLOR1_BASE, 0);
1324
	WREG32(CB_COLOR2_BASE, 0);
1324
	WREG32(CB_COLOR2_BASE, 0);
1325
	WREG32(CB_COLOR3_BASE, 0);
1325
	WREG32(CB_COLOR3_BASE, 0);
1326
	WREG32(CB_COLOR4_BASE, 0);
1326
	WREG32(CB_COLOR4_BASE, 0);
1327
	WREG32(CB_COLOR5_BASE, 0);
1327
	WREG32(CB_COLOR5_BASE, 0);
1328
	WREG32(CB_COLOR6_BASE, 0);
1328
	WREG32(CB_COLOR6_BASE, 0);
1329
	WREG32(CB_COLOR7_BASE, 0);
1329
	WREG32(CB_COLOR7_BASE, 0);
1330
	WREG32(CB_COLOR7_FRAG, 0);
1330
	WREG32(CB_COLOR7_FRAG, 0);
1331
 
1331
 
1332
	switch (rdev->family) {
1332
	switch (rdev->family) {
1333
	case CHIP_RV610:
1333
	case CHIP_RV610:
1334
	case CHIP_RV620:
1334
	case CHIP_RV620:
1335
	case CHIP_RS780:
1335
	case CHIP_RS780:
1336
	case CHIP_RS880:
1336
	case CHIP_RS880:
1337
		tmp = TC_L2_SIZE(8);
1337
		tmp = TC_L2_SIZE(8);
1338
		break;
1338
		break;
1339
	case CHIP_RV630:
1339
	case CHIP_RV630:
1340
	case CHIP_RV635:
1340
	case CHIP_RV635:
1341
		tmp = TC_L2_SIZE(4);
1341
		tmp = TC_L2_SIZE(4);
1342
		break;
1342
		break;
1343
	case CHIP_R600:
1343
	case CHIP_R600:
1344
		tmp = TC_L2_SIZE(0) | L2_DISABLE_LATE_HIT;
1344
		tmp = TC_L2_SIZE(0) | L2_DISABLE_LATE_HIT;
1345
		break;
1345
		break;
1346
	default:
1346
	default:
1347
		tmp = TC_L2_SIZE(0);
1347
		tmp = TC_L2_SIZE(0);
1348
		break;
1348
		break;
1349
	}
1349
	}
1350
	WREG32(TC_CNTL, tmp);
1350
	WREG32(TC_CNTL, tmp);
1351
 
1351
 
1352
	tmp = RREG32(HDP_HOST_PATH_CNTL);
1352
	tmp = RREG32(HDP_HOST_PATH_CNTL);
1353
	WREG32(HDP_HOST_PATH_CNTL, tmp);
1353
	WREG32(HDP_HOST_PATH_CNTL, tmp);
1354
 
1354
 
1355
	tmp = RREG32(ARB_POP);
1355
	tmp = RREG32(ARB_POP);
1356
	tmp |= ENABLE_TC128;
1356
	tmp |= ENABLE_TC128;
1357
	WREG32(ARB_POP, tmp);
1357
	WREG32(ARB_POP, tmp);
1358
 
1358
 
1359
	WREG32(PA_SC_MULTI_CHIP_CNTL, 0);
1359
	WREG32(PA_SC_MULTI_CHIP_CNTL, 0);
1360
	WREG32(PA_CL_ENHANCE, (CLIP_VTX_REORDER_ENA |
1360
	WREG32(PA_CL_ENHANCE, (CLIP_VTX_REORDER_ENA |
1361
			       NUM_CLIP_SEQ(3)));
1361
			       NUM_CLIP_SEQ(3)));
1362
	WREG32(PA_SC_ENHANCE, FORCE_EOV_MAX_CLK_CNT(4095));
1362
	WREG32(PA_SC_ENHANCE, FORCE_EOV_MAX_CLK_CNT(4095));
1363
}
1363
}
1364
 
1364
 
1365
 
1365
 
1366
/*
1366
/*
1367
 * Indirect registers accessor
1367
 * Indirect registers accessor
1368
 */
1368
 */
1369
u32 r600_pciep_rreg(struct radeon_device *rdev, u32 reg)
1369
u32 r600_pciep_rreg(struct radeon_device *rdev, u32 reg)
1370
{
1370
{
1371
	u32 r;
1371
	u32 r;
1372
 
1372
 
1373
	WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
1373
	WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
1374
	(void)RREG32(PCIE_PORT_INDEX);
1374
	(void)RREG32(PCIE_PORT_INDEX);
1375
	r = RREG32(PCIE_PORT_DATA);
1375
	r = RREG32(PCIE_PORT_DATA);
1376
	return r;
1376
	return r;
1377
}
1377
}
1378
 
1378
 
1379
void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v)
1379
void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v)
1380
{
1380
{
1381
	WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
1381
	WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
1382
	(void)RREG32(PCIE_PORT_INDEX);
1382
	(void)RREG32(PCIE_PORT_INDEX);
1383
	WREG32(PCIE_PORT_DATA, (v));
1383
	WREG32(PCIE_PORT_DATA, (v));
1384
	(void)RREG32(PCIE_PORT_DATA);
1384
	(void)RREG32(PCIE_PORT_DATA);
1385
}
1385
}
1386
 
1386
 
1387
/*
1387
/*
1388
 * CP & Ring
1388
 * CP & Ring
1389
 */
1389
 */
1390
void r600_cp_stop(struct radeon_device *rdev)
1390
void r600_cp_stop(struct radeon_device *rdev)
1391
{
1391
{
1392
	WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
1392
	WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
1393
}
1393
}
-
 
1394
 
-
 
1395
int r600_init_microcode(struct radeon_device *rdev)
-
 
1396
{
-
 
1397
	struct platform_device *pdev;
-
 
1398
	const char *chip_name;
-
 
1399
	const char *rlc_chip_name;
-
 
1400
	size_t pfp_req_size, me_req_size, rlc_req_size;
-
 
1401
	char fw_name[30];
-
 
1402
	int err;
-
 
1403
 
-
 
1404
	DRM_DEBUG("\n");
-
 
1405
 
-
 
1406
	pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0);
-
 
1407
	err = IS_ERR(pdev);
-
 
1408
	if (err) {
-
 
1409
		printk(KERN_ERR "radeon_cp: Failed to register firmware\n");
-
 
1410
		return -EINVAL;
-
 
1411
	}
-
 
1412
 
-
 
1413
	switch (rdev->family) {
-
 
1414
	case CHIP_R600:
-
 
1415
		chip_name = "R600";
-
 
1416
		rlc_chip_name = "R600";
-
 
1417
		break;
-
 
1418
	case CHIP_RV610:
-
 
1419
		chip_name = "RV610";
-
 
1420
		rlc_chip_name = "R600";
-
 
1421
		break;
-
 
1422
	case CHIP_RV630:
-
 
1423
		chip_name = "RV630";
-
 
1424
		rlc_chip_name = "R600";
-
 
1425
		break;
-
 
1426
	case CHIP_RV620:
-
 
1427
		chip_name = "RV620";
-
 
1428
		rlc_chip_name = "R600";
-
 
1429
		break;
-
 
1430
	case CHIP_RV635:
-
 
1431
		chip_name = "RV635";
-
 
1432
		rlc_chip_name = "R600";
-
 
1433
		break;
-
 
1434
	case CHIP_RV670:
-
 
1435
		chip_name = "RV670";
-
 
1436
		rlc_chip_name = "R600";
-
 
1437
		break;
-
 
1438
	case CHIP_RS780:
-
 
1439
	case CHIP_RS880:
-
 
1440
		chip_name = "RS780";
-
 
1441
		rlc_chip_name = "R600";
-
 
1442
		break;
-
 
1443
	case CHIP_RV770:
-
 
1444
		chip_name = "RV770";
-
 
1445
		rlc_chip_name = "R700";
-
 
1446
		break;
-
 
1447
	case CHIP_RV730:
-
 
1448
	case CHIP_RV740:
-
 
1449
		chip_name = "RV730";
-
 
1450
		rlc_chip_name = "R700";
-
 
1451
		break;
-
 
1452
	case CHIP_RV710:
-
 
1453
		chip_name = "RV710";
-
 
1454
		rlc_chip_name = "R700";
-
 
1455
		break;
-
 
1456
	default: BUG();
-
 
1457
	}
-
 
1458
 
-
 
1459
	if (rdev->family >= CHIP_RV770) {
-
 
1460
		pfp_req_size = R700_PFP_UCODE_SIZE * 4;
-
 
1461
		me_req_size = R700_PM4_UCODE_SIZE * 4;
-
 
1462
		rlc_req_size = R700_RLC_UCODE_SIZE * 4;
-
 
1463
	} else {
-
 
1464
		pfp_req_size = PFP_UCODE_SIZE * 4;
-
 
1465
		me_req_size = PM4_UCODE_SIZE * 12;
-
 
1466
		rlc_req_size = RLC_UCODE_SIZE * 4;
-
 
1467
	}
-
 
1468
 
-
 
1469
	DRM_INFO("Loading %s Microcode\n", chip_name);
-
 
1470
 
-
 
1471
	snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
-
 
1472
	err = request_firmware(&rdev->pfp_fw, fw_name, &pdev->dev);
-
 
1473
	if (err)
-
 
1474
		goto out;
-
 
1475
	if (rdev->pfp_fw->size != pfp_req_size) {
-
 
1476
		printk(KERN_ERR
-
 
1477
		       "r600_cp: Bogus length %zu in firmware \"%s\"\n",
-
 
1478
		       rdev->pfp_fw->size, fw_name);
-
 
1479
		err = -EINVAL;
-
 
1480
		goto out;
-
 
1481
	}
-
 
1482
 
-
 
1483
	snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
-
 
1484
	err = request_firmware(&rdev->me_fw, fw_name, &pdev->dev);
-
 
1485
	if (err)
-
 
1486
		goto out;
-
 
1487
	if (rdev->me_fw->size != me_req_size) {
-
 
1488
		printk(KERN_ERR
-
 
1489
		       "r600_cp: Bogus length %zu in firmware \"%s\"\n",
-
 
1490
		       rdev->me_fw->size, fw_name);
-
 
1491
		err = -EINVAL;
-
 
1492
	}
-
 
1493
 
-
 
1494
	snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name);
-
 
1495
	err = request_firmware(&rdev->rlc_fw, fw_name, &pdev->dev);
-
 
1496
	if (err)
-
 
1497
		goto out;
-
 
1498
	if (rdev->rlc_fw->size != rlc_req_size) {
-
 
1499
		printk(KERN_ERR
-
 
1500
		       "r600_rlc: Bogus length %zu in firmware \"%s\"\n",
-
 
1501
		       rdev->rlc_fw->size, fw_name);
-
 
1502
		err = -EINVAL;
-
 
1503
	}
-
 
1504
 
-
 
1505
out:
-
 
1506
	platform_device_unregister(pdev);
-
 
1507
 
-
 
1508
	if (err) {
-
 
1509
		if (err != -EINVAL)
-
 
1510
			printk(KERN_ERR
-
 
1511
			       "r600_cp: Failed to load firmware \"%s\"\n",
-
 
1512
			       fw_name);
-
 
1513
		release_firmware(rdev->pfp_fw);
-
 
1514
		rdev->pfp_fw = NULL;
-
 
1515
		release_firmware(rdev->me_fw);
-
 
1516
		rdev->me_fw = NULL;
-
 
1517
		release_firmware(rdev->rlc_fw);
-
 
1518
		rdev->rlc_fw = NULL;
-
 
1519
	}
-
 
1520
	return err;
-
 
1521
}
-
 
1522
 
-
 
1523
static int r600_cp_load_microcode(struct radeon_device *rdev)
-
 
1524
{
-
 
1525
	const __be32 *fw_data;
-
 
1526
	int i;
-
 
1527
 
-
 
1528
	if (!rdev->me_fw || !rdev->pfp_fw)
-
 
1529
		return -EINVAL;
-
 
1530
 
-
 
1531
	r600_cp_stop(rdev);
-
 
1532
 
-
 
1533
	WREG32(CP_RB_CNTL, RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3));
-
 
1534
 
-
 
1535
	/* Reset cp */
-
 
1536
	WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
-
 
1537
	RREG32(GRBM_SOFT_RESET);
-
 
1538
	mdelay(15);
-
 
1539
	WREG32(GRBM_SOFT_RESET, 0);
-
 
1540
 
-
 
1541
	WREG32(CP_ME_RAM_WADDR, 0);
-
 
1542
 
-
 
1543
	fw_data = (const __be32 *)rdev->me_fw->data;
-
 
1544
	WREG32(CP_ME_RAM_WADDR, 0);
-
 
1545
	for (i = 0; i < PM4_UCODE_SIZE * 3; i++)
-
 
1546
		WREG32(CP_ME_RAM_DATA,
-
 
1547
		       be32_to_cpup(fw_data++));
-
 
1548
 
-
 
1549
	fw_data = (const __be32 *)rdev->pfp_fw->data;
-
 
1550
	WREG32(CP_PFP_UCODE_ADDR, 0);
-
 
1551
	for (i = 0; i < PFP_UCODE_SIZE; i++)
-
 
1552
		WREG32(CP_PFP_UCODE_DATA,
-
 
1553
		       be32_to_cpup(fw_data++));
-
 
1554
 
-
 
1555
	WREG32(CP_PFP_UCODE_ADDR, 0);
-
 
1556
	WREG32(CP_ME_RAM_WADDR, 0);
-
 
1557
	WREG32(CP_ME_RAM_RADDR, 0);
-
 
1558
	return 0;
-
 
1559
}
-
 
1560
 
1394
int r600_cp_start(struct radeon_device *rdev)
1561
int r600_cp_start(struct radeon_device *rdev)
1395
{
1562
{
1396
	int r;
1563
	int r;
1397
	uint32_t cp_me;
1564
	uint32_t cp_me;
1398
 
1565
 
1399
	r = radeon_ring_lock(rdev, 7);
1566
	r = radeon_ring_lock(rdev, 7);
1400
	if (r) {
1567
	if (r) {
1401
		DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
1568
		DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
1402
		return r;
1569
		return r;
1403
	}
1570
	}
1404
	radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5));
1571
	radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5));
1405
	radeon_ring_write(rdev, 0x1);
1572
	radeon_ring_write(rdev, 0x1);
1406
	if (rdev->family < CHIP_RV770) {
1573
	if (rdev->family < CHIP_RV770) {
1407
		radeon_ring_write(rdev, 0x3);
1574
		radeon_ring_write(rdev, 0x3);
1408
		radeon_ring_write(rdev, rdev->config.r600.max_hw_contexts - 1);
1575
		radeon_ring_write(rdev, rdev->config.r600.max_hw_contexts - 1);
1409
	} else {
1576
	} else {
1410
		radeon_ring_write(rdev, 0x0);
1577
		radeon_ring_write(rdev, 0x0);
1411
		radeon_ring_write(rdev, rdev->config.rv770.max_hw_contexts - 1);
1578
		radeon_ring_write(rdev, rdev->config.rv770.max_hw_contexts - 1);
1412
	}
1579
	}
1413
	radeon_ring_write(rdev, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
1580
	radeon_ring_write(rdev, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
1414
	radeon_ring_write(rdev, 0);
1581
	radeon_ring_write(rdev, 0);
1415
	radeon_ring_write(rdev, 0);
1582
	radeon_ring_write(rdev, 0);
1416
	radeon_ring_unlock_commit(rdev);
1583
	radeon_ring_unlock_commit(rdev);
1417
 
1584
 
1418
	cp_me = 0xff;
1585
	cp_me = 0xff;
1419
	WREG32(R_0086D8_CP_ME_CNTL, cp_me);
1586
	WREG32(R_0086D8_CP_ME_CNTL, cp_me);
1420
	return 0;
1587
	return 0;
1421
}
1588
}
-
 
1589
 
-
 
1590
int r600_cp_resume(struct radeon_device *rdev)
-
 
1591
{
-
 
1592
	u32 tmp;
-
 
1593
	u32 rb_bufsz;
-
 
1594
	int r;
-
 
1595
 
-
 
1596
	/* Reset cp */
-
 
1597
	WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
-
 
1598
	RREG32(GRBM_SOFT_RESET);
-
 
1599
	mdelay(15);
-
 
1600
	WREG32(GRBM_SOFT_RESET, 0);
-
 
1601
 
-
 
1602
	/* Set ring buffer size */
-
 
1603
	rb_bufsz = drm_order(rdev->cp.ring_size / 8);
-
 
1604
	tmp = RB_NO_UPDATE | (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
-
 
1605
#ifdef __BIG_ENDIAN
-
 
1606
	tmp |= BUF_SWAP_32BIT;
-
 
1607
#endif
-
 
1608
	WREG32(CP_RB_CNTL, tmp);
-
 
1609
	WREG32(CP_SEM_WAIT_TIMER, 0x4);
-
 
1610
 
-
 
1611
	/* Set the write pointer delay */
-
 
1612
	WREG32(CP_RB_WPTR_DELAY, 0);
-
 
1613
 
-
 
1614
	/* Initialize the ring buffer's read and write pointers */
-
 
1615
	WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
-
 
1616
	WREG32(CP_RB_RPTR_WR, 0);
-
 
1617
	WREG32(CP_RB_WPTR, 0);
-
 
1618
	WREG32(CP_RB_RPTR_ADDR, rdev->cp.gpu_addr & 0xFFFFFFFF);
-
 
1619
	WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->cp.gpu_addr));
-
 
1620
	mdelay(1);
-
 
1621
	WREG32(CP_RB_CNTL, tmp);
-
 
1622
 
-
 
1623
	WREG32(CP_RB_BASE, rdev->cp.gpu_addr >> 8);
-
 
1624
	WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
-
 
1625
 
-
 
1626
	rdev->cp.rptr = RREG32(CP_RB_RPTR);
-
 
1627
	rdev->cp.wptr = RREG32(CP_RB_WPTR);
-
 
1628
 
-
 
1629
	r600_cp_start(rdev);
-
 
1630
	rdev->cp.ready = true;
-
 
1631
	r = radeon_ring_test(rdev);
-
 
1632
	if (r) {
-
 
1633
		rdev->cp.ready = false;
-
 
1634
		return r;
-
 
1635
	}
-
 
1636
	return 0;
-
 
1637
}
-
 
1638
 
1422
void r600_cp_commit(struct radeon_device *rdev)
1639
void r600_cp_commit(struct radeon_device *rdev)
1423
{
1640
{
1424
	WREG32(CP_RB_WPTR, rdev->cp.wptr);
1641
	WREG32(CP_RB_WPTR, rdev->cp.wptr);
1425
	(void)RREG32(CP_RB_WPTR);
1642
	(void)RREG32(CP_RB_WPTR);
1426
}
1643
}
1427
 
1644
 
1428
void r600_ring_init(struct radeon_device *rdev, unsigned ring_size)
1645
void r600_ring_init(struct radeon_device *rdev, unsigned ring_size)
1429
{
1646
{
1430
	u32 rb_bufsz;
1647
	u32 rb_bufsz;
1431
 
1648
 
1432
	/* Align ring size */
1649
	/* Align ring size */
1433
	rb_bufsz = drm_order(ring_size / 8);
1650
	rb_bufsz = drm_order(ring_size / 8);
1434
	ring_size = (1 << (rb_bufsz + 1)) * 4;
1651
	ring_size = (1 << (rb_bufsz + 1)) * 4;
1435
	rdev->cp.ring_size = ring_size;
1652
	rdev->cp.ring_size = ring_size;
1436
	rdev->cp.align_mask = 16 - 1;
1653
	rdev->cp.align_mask = 16 - 1;
1437
}
1654
}
1438
 
1655
 
1439
 
1656
 
1440
/*
1657
/*
1441
 * GPU scratch registers helpers function.
1658
 * GPU scratch registers helpers function.
1442
 */
1659
 */
1443
void r600_scratch_init(struct radeon_device *rdev)
1660
void r600_scratch_init(struct radeon_device *rdev)
1444
{
1661
{
1445
	int i;
1662
	int i;
1446
 
1663
 
1447
	rdev->scratch.num_reg = 7;
1664
	rdev->scratch.num_reg = 7;
1448
	for (i = 0; i < rdev->scratch.num_reg; i++) {
1665
	for (i = 0; i < rdev->scratch.num_reg; i++) {
1449
		rdev->scratch.free[i] = true;
1666
		rdev->scratch.free[i] = true;
1450
		rdev->scratch.reg[i] = SCRATCH_REG0 + (i * 4);
1667
		rdev->scratch.reg[i] = SCRATCH_REG0 + (i * 4);
1451
	}
1668
	}
1452
}
1669
}
-
 
1670
 
-
 
1671
int r600_ring_test(struct radeon_device *rdev)
-
 
1672
{
-
 
1673
	uint32_t scratch;
-
 
1674
	uint32_t tmp = 0;
-
 
1675
	unsigned i;
-
 
1676
	int r;
-
 
1677
 
-
 
1678
	r = radeon_scratch_get(rdev, &scratch);
-
 
1679
	if (r) {
-
 
1680
		DRM_ERROR("radeon: cp failed to get scratch reg (%d).\n", r);
-
 
1681
		return r;
-
 
1682
	}
-
 
1683
	WREG32(scratch, 0xCAFEDEAD);
-
 
1684
	r = radeon_ring_lock(rdev, 3);
-
 
1685
	if (r) {
-
 
1686
		DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
-
 
1687
		radeon_scratch_free(rdev, scratch);
-
 
1688
		return r;
-
 
1689
	}
-
 
1690
	radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
-
 
1691
	radeon_ring_write(rdev, ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
-
 
1692
	radeon_ring_write(rdev, 0xDEADBEEF);
-
 
1693
	radeon_ring_unlock_commit(rdev);
-
 
1694
	for (i = 0; i < rdev->usec_timeout; i++) {
-
 
1695
		tmp = RREG32(scratch);
-
 
1696
		if (tmp == 0xDEADBEEF)
-
 
1697
			break;
-
 
1698
		DRM_UDELAY(1);
-
 
1699
	}
-
 
1700
	if (i < rdev->usec_timeout) {
-
 
1701
		DRM_INFO("ring test succeeded in %d usecs\n", i);
-
 
1702
	} else {
-
 
1703
		DRM_ERROR("radeon: ring test failed (scratch(0x%04X)=0x%08X)\n",
-
 
1704
			  scratch, tmp);
-
 
1705
		r = -EINVAL;
-
 
1706
	}
-
 
1707
	radeon_scratch_free(rdev, scratch);
-
 
1708
	return r;
-
 
1709
}
-
 
1710
void r600_fence_ring_emit(struct radeon_device *rdev,
-
 
1711
			  struct radeon_fence *fence)
-
 
1712
{
-
 
1713
	/* Also consider EVENT_WRITE_EOP.  it handles the interrupts + timestamps + events */
-
 
1714
	/* Emit fence sequence & fire IRQ */
-
 
1715
	radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
-
 
1716
	radeon_ring_write(rdev, ((rdev->fence_drv.scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
-
 
1717
	radeon_ring_write(rdev, fence->seq);
-
 
1718
	radeon_ring_write(rdev, PACKET0(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0));
-
 
1719
	radeon_ring_write(rdev, 1);
-
 
1720
	/* CP_INTERRUPT packet 3 no longer exists, use packet 0 */
-
 
1721
	radeon_ring_write(rdev, PACKET0(CP_INT_STATUS, 0));
-
 
1722
	radeon_ring_write(rdev, RB_INT_STAT);
-
 
1723
}
1453
int r600_set_surface_reg(struct radeon_device *rdev, int reg,
1724
int r600_set_surface_reg(struct radeon_device *rdev, int reg,
1454
			 uint32_t tiling_flags, uint32_t pitch,
1725
			 uint32_t tiling_flags, uint32_t pitch,
1455
			 uint32_t offset, uint32_t obj_size)
1726
			 uint32_t offset, uint32_t obj_size)
1456
{
1727
{
1457
	/* FIXME: implement */
1728
	/* FIXME: implement */
1458
	return 0;
1729
	return 0;
1459
}
1730
}
1460
 
1731
 
1461
void r600_clear_surface_reg(struct radeon_device *rdev, int reg)
1732
void r600_clear_surface_reg(struct radeon_device *rdev, int reg)
1462
{
1733
{
1463
	/* FIXME: implement */
1734
	/* FIXME: implement */
1464
}
1735
}
1465
 
1736
 
1466
 
1737
 
1467
bool r600_card_posted(struct radeon_device *rdev)
1738
bool r600_card_posted(struct radeon_device *rdev)
1468
{
1739
{
1469
	uint32_t reg;
1740
	uint32_t reg;
1470
 
1741
 
1471
	/* first check CRTCs */
1742
	/* first check CRTCs */
1472
	reg = RREG32(D1CRTC_CONTROL) |
1743
	reg = RREG32(D1CRTC_CONTROL) |
1473
		RREG32(D2CRTC_CONTROL);
1744
		RREG32(D2CRTC_CONTROL);
1474
	if (reg & CRTC_EN)
1745
	if (reg & CRTC_EN)
1475
		return true;
1746
		return true;
1476
 
1747
 
1477
	/* then check MEM_SIZE, in case the crtcs are off */
1748
	/* then check MEM_SIZE, in case the crtcs are off */
1478
	if (RREG32(CONFIG_MEMSIZE))
1749
	if (RREG32(CONFIG_MEMSIZE))
1479
		return true;
1750
		return true;
1480
 
1751
 
1481
	return false;
1752
	return false;
1482
}
1753
}
1483
 
1754
 
1484
int r600_startup(struct radeon_device *rdev)
1755
int r600_startup(struct radeon_device *rdev)
1485
{
1756
{
1486
	int r;
1757
	int r;
-
 
1758
 
-
 
1759
	if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
-
 
1760
		r = r600_init_microcode(rdev);
-
 
1761
		if (r) {
-
 
1762
			DRM_ERROR("Failed to load firmware!\n");
-
 
1763
			return r;
-
 
1764
		}
-
 
1765
	}
1487
 
1766
 
1488
	r600_mc_program(rdev);
1767
	r600_mc_program(rdev);
1489
	if (rdev->flags & RADEON_IS_AGP) {
1768
	if (rdev->flags & RADEON_IS_AGP) {
1490
		r600_agp_enable(rdev);
1769
		r600_agp_enable(rdev);
1491
	} else {
1770
	} else {
1492
		r = r600_pcie_gart_enable(rdev);
1771
		r = r600_pcie_gart_enable(rdev);
1493
		if (r)
1772
		if (r)
1494
			return r;
1773
			return r;
1495
	}
1774
	}
1496
	r600_gpu_init(rdev);
1775
	r600_gpu_init(rdev);
1497
 
-
 
1498
//	r = radeon_object_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
-
 
1499
//			      &rdev->r600_blit.shader_gpu_addr);
-
 
1500
//	if (r) {
-
 
1501
//		DRM_ERROR("failed to pin blit object %d\n", r);
-
 
1502
//		return r;
-
 
1503
//	}
-
 
1504
 
1776
 
1505
//	r = radeon_ring_init(rdev, rdev->cp.ring_size);
1777
	r = radeon_ring_init(rdev, rdev->cp.ring_size);
1506
//	if (r)
1778
	if (r)
1507
//		return r;
1779
		return r;
1508
//	r = r600_cp_load_microcode(rdev);
1780
	r = r600_cp_load_microcode(rdev);
1509
//	if (r)
1781
	if (r)
1510
//		return r;
1782
		return r;
1511
//	r = r600_cp_resume(rdev);
1783
	r = r600_cp_resume(rdev);
1512
//	if (r)
1784
	if (r)
1513
//		return r;
1785
		return r;
1514
	/* write back buffer are not vital so don't worry about failure */
1786
	/* write back buffer are not vital so don't worry about failure */
1515
//	r600_wb_enable(rdev);
1787
//	r600_wb_enable(rdev);
1516
	return 0;
1788
	return 0;
1517
}
1789
}
1518
 
1790
 
1519
void r600_vga_set_state(struct radeon_device *rdev, bool state)
1791
void r600_vga_set_state(struct radeon_device *rdev, bool state)
1520
{
1792
{
1521
	uint32_t temp;
1793
	uint32_t temp;
1522
 
1794
 
1523
	temp = RREG32(CONFIG_CNTL);
1795
	temp = RREG32(CONFIG_CNTL);
1524
	if (state == false) {
1796
	if (state == false) {
1525
		temp &= ~(1<<0);
1797
		temp &= ~(1<<0);
1526
		temp |= (1<<1);
1798
		temp |= (1<<1);
1527
	} else {
1799
	} else {
1528
		temp &= ~(1<<1);
1800
		temp &= ~(1<<1);
1529
	}
1801
	}
1530
	WREG32(CONFIG_CNTL, temp);
1802
	WREG32(CONFIG_CNTL, temp);
1531
}
1803
}
1532
 
1804
 
1533
 
1805
 
1534
 
1806
 
1535
 
1807
 
1536
 
1808
 
1537
/* Plan is to move initialization in that function and use
1809
/* Plan is to move initialization in that function and use
1538
 * helper function so that radeon_device_init pretty much
1810
 * helper function so that radeon_device_init pretty much
1539
 * do nothing more than calling asic specific function. This
1811
 * do nothing more than calling asic specific function. This
1540
 * should also allow to remove a bunch of callback function
1812
 * should also allow to remove a bunch of callback function
1541
 * like vram_info.
1813
 * like vram_info.
1542
 */
1814
 */
1543
int r600_init(struct radeon_device *rdev)
1815
int r600_init(struct radeon_device *rdev)
1544
{
1816
{
1545
	int r;
1817
	int r;
1546
 
1818
 
1547
	r = radeon_dummy_page_init(rdev);
1819
	r = radeon_dummy_page_init(rdev);
1548
	if (r)
1820
	if (r)
1549
		return r;
1821
		return r;
1550
	if (r600_debugfs_mc_info_init(rdev)) {
1822
	if (r600_debugfs_mc_info_init(rdev)) {
1551
		DRM_ERROR("Failed to register debugfs file for mc !\n");
1823
		DRM_ERROR("Failed to register debugfs file for mc !\n");
1552
	}
1824
	}
1553
	/* This don't do much */
1825
	/* This don't do much */
1554
	r = radeon_gem_init(rdev);
1826
	r = radeon_gem_init(rdev);
1555
	if (r)
1827
	if (r)
1556
		return r;
1828
		return r;
1557
	/* Read BIOS */
1829
	/* Read BIOS */
1558
	if (!radeon_get_bios(rdev)) {
1830
	if (!radeon_get_bios(rdev)) {
1559
		if (ASIC_IS_AVIVO(rdev))
1831
		if (ASIC_IS_AVIVO(rdev))
1560
			return -EINVAL;
1832
			return -EINVAL;
1561
	}
1833
	}
1562
	/* Must be an ATOMBIOS */
1834
	/* Must be an ATOMBIOS */
1563
	if (!rdev->is_atom_bios) {
1835
	if (!rdev->is_atom_bios) {
1564
		dev_err(rdev->dev, "Expecting atombios for R600 GPU\n");
1836
		dev_err(rdev->dev, "Expecting atombios for R600 GPU\n");
1565
		return -EINVAL;
1837
		return -EINVAL;
1566
	}
1838
	}
1567
	r = radeon_atombios_init(rdev);
1839
	r = radeon_atombios_init(rdev);
1568
	if (r)
1840
	if (r)
1569
		return r;
1841
		return r;
1570
	/* Post card if necessary */
1842
	/* Post card if necessary */
1571
	if (!r600_card_posted(rdev)) {
1843
	if (!r600_card_posted(rdev)) {
1572
		if (!rdev->bios) {
1844
		if (!rdev->bios) {
1573
			dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
1845
			dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
1574
			return -EINVAL;
1846
			return -EINVAL;
1575
		}
1847
		}
1576
		DRM_INFO("GPU not posted. posting now...\n");
1848
		DRM_INFO("GPU not posted. posting now...\n");
1577
		atom_asic_init(rdev->mode_info.atom_context);
1849
		atom_asic_init(rdev->mode_info.atom_context);
1578
	}
1850
	}
1579
	/* Initialize scratch registers */
1851
	/* Initialize scratch registers */
1580
	r600_scratch_init(rdev);
1852
	r600_scratch_init(rdev);
1581
	/* Initialize surface registers */
1853
	/* Initialize surface registers */
1582
	radeon_surface_init(rdev);
1854
	radeon_surface_init(rdev);
1583
	/* Initialize clocks */
1855
	/* Initialize clocks */
1584
	radeon_get_clock_info(rdev->ddev);
1856
	radeon_get_clock_info(rdev->ddev);
1585
	r = radeon_clocks_init(rdev);
1857
	r = radeon_clocks_init(rdev);
1586
	if (r)
1858
	if (r)
1587
		return r;
1859
		return r;
1588
	/* Initialize power management */
1860
	/* Initialize power management */
1589
	radeon_pm_init(rdev);
1861
	radeon_pm_init(rdev);
1590
	/* Fence driver */
1862
	/* Fence driver */
1591
//	r = radeon_fence_driver_init(rdev);
1863
//	r = radeon_fence_driver_init(rdev);
1592
//	if (r)
1864
//	if (r)
1593
//		return r;
1865
//		return r;
1594
	if (rdev->flags & RADEON_IS_AGP) {
1866
	if (rdev->flags & RADEON_IS_AGP) {
1595
		r = radeon_agp_init(rdev);
1867
		r = radeon_agp_init(rdev);
1596
		if (r)
1868
		if (r)
1597
			radeon_agp_disable(rdev);
1869
			radeon_agp_disable(rdev);
1598
	}
1870
	}
1599
	r = r600_mc_init(rdev);
1871
	r = r600_mc_init(rdev);
1600
    dbgprintf("mc vram location %x\n", rdev->mc.vram_location);
1872
    dbgprintf("mc vram location %x\n", rdev->mc.vram_location);
1601
	if (r)
1873
	if (r)
1602
		return r;
1874
		return r;
1603
	/* Memory manager */
1875
	/* Memory manager */
1604
	r = radeon_bo_init(rdev);
1876
	r = radeon_bo_init(rdev);
1605
	if (r)
1877
	if (r)
1606
		return r;
1878
		return r;
1607
 
1879
 
1608
//	r = radeon_irq_kms_init(rdev);
1880
//	r = radeon_irq_kms_init(rdev);
1609
//	if (r)
1881
//	if (r)
1610
//		return r;
1882
//		return r;
1611
 
1883
 
1612
//	rdev->cp.ring_obj = NULL;
1884
	rdev->cp.ring_obj = NULL;
1613
//	r600_ring_init(rdev, 1024 * 1024);
1885
	r600_ring_init(rdev, 1024 * 1024);
1614
 
1886
 
1615
//	rdev->ih.ring_obj = NULL;
1887
//	rdev->ih.ring_obj = NULL;
1616
//	r600_ih_ring_init(rdev, 64 * 1024);
1888
//	r600_ih_ring_init(rdev, 64 * 1024);
1617
 
1889
 
1618
	r = r600_pcie_gart_init(rdev);
1890
	r = r600_pcie_gart_init(rdev);
1619
	if (r)
1891
	if (r)
1620
		return r;
1892
		return r;
1621
 
-
 
1622
//	r = r600_blit_init(rdev);
-
 
1623
//	if (r) {
-
 
1624
//		DRM_ERROR("radeon: failled blitter (%d).\n", r);
-
 
1625
//		return r;
-
 
1626
//	}
-
 
1627
 
1893
 
1628
	rdev->accel_working = true;
1894
	rdev->accel_working = true;
1629
	r = r600_startup(rdev);
1895
	r = r600_startup(rdev);
1630
	if (r) {
1896
	if (r) {
1631
//		r600_suspend(rdev);
1897
//		r600_suspend(rdev);
1632
//		r600_wb_fini(rdev);
1898
//		r600_wb_fini(rdev);
1633
//		radeon_ring_fini(rdev);
1899
//		radeon_ring_fini(rdev);
1634
		r600_pcie_gart_fini(rdev);
1900
		r600_pcie_gart_fini(rdev);
1635
		rdev->accel_working = false;
1901
		rdev->accel_working = false;
1636
	}
1902
	}
1637
	if (rdev->accel_working) {
1903
	if (rdev->accel_working) {
1638
//		r = radeon_ib_pool_init(rdev);
1904
//		r = radeon_ib_pool_init(rdev);
1639
//		if (r) {
1905
//		if (r) {
1640
//			DRM_ERROR("radeon: failled initializing IB pool (%d).\n", r);
1906
//			DRM_ERROR("radeon: failled initializing IB pool (%d).\n", r);
1641
//			rdev->accel_working = false;
1907
//			rdev->accel_working = false;
1642
//		}
1908
//		}
1643
//		r = r600_ib_test(rdev);
1909
//		r = r600_ib_test(rdev);
1644
//		if (r) {
1910
//		if (r) {
1645
//			DRM_ERROR("radeon: failled testing IB (%d).\n", r);
1911
//			DRM_ERROR("radeon: failled testing IB (%d).\n", r);
1646
//			rdev->accel_working = false;
1912
//			rdev->accel_working = false;
1647
//		}
1913
//		}
1648
	}
1914
	}
1649
	return 0;
1915
	return 0;
1650
}
1916
}
1651
 
1917
 
1652
 
1918
 
1653
 
1919
 
1654
 
1920
 
1655
 
1921
 
1656
 
1922
 
1657
 
1923
 
1658
 
1924
 
1659
 
1925
 
1660
/*
1926
/*
1661
 * Debugfs info
1927
 * Debugfs info
1662
 */
1928
 */
1663
#if defined(CONFIG_DEBUG_FS)
1929
#if defined(CONFIG_DEBUG_FS)
1664
 
1930
 
1665
static int r600_debugfs_cp_ring_info(struct seq_file *m, void *data)
1931
static int r600_debugfs_cp_ring_info(struct seq_file *m, void *data)
1666
{
1932
{
1667
	struct drm_info_node *node = (struct drm_info_node *) m->private;
1933
	struct drm_info_node *node = (struct drm_info_node *) m->private;
1668
	struct drm_device *dev = node->minor->dev;
1934
	struct drm_device *dev = node->minor->dev;
1669
	struct radeon_device *rdev = dev->dev_private;
1935
	struct radeon_device *rdev = dev->dev_private;
1670
	unsigned count, i, j;
1936
	unsigned count, i, j;
1671
 
1937
 
1672
	radeon_ring_free_size(rdev);
1938
	radeon_ring_free_size(rdev);
1673
	count = (rdev->cp.ring_size / 4) - rdev->cp.ring_free_dw;
1939
	count = (rdev->cp.ring_size / 4) - rdev->cp.ring_free_dw;
1674
	seq_printf(m, "CP_STAT 0x%08x\n", RREG32(CP_STAT));
1940
	seq_printf(m, "CP_STAT 0x%08x\n", RREG32(CP_STAT));
1675
	seq_printf(m, "CP_RB_WPTR 0x%08x\n", RREG32(CP_RB_WPTR));
1941
	seq_printf(m, "CP_RB_WPTR 0x%08x\n", RREG32(CP_RB_WPTR));
1676
	seq_printf(m, "CP_RB_RPTR 0x%08x\n", RREG32(CP_RB_RPTR));
1942
	seq_printf(m, "CP_RB_RPTR 0x%08x\n", RREG32(CP_RB_RPTR));
1677
	seq_printf(m, "driver's copy of the CP_RB_WPTR 0x%08x\n", rdev->cp.wptr);
1943
	seq_printf(m, "driver's copy of the CP_RB_WPTR 0x%08x\n", rdev->cp.wptr);
1678
	seq_printf(m, "driver's copy of the CP_RB_RPTR 0x%08x\n", rdev->cp.rptr);
1944
	seq_printf(m, "driver's copy of the CP_RB_RPTR 0x%08x\n", rdev->cp.rptr);
1679
	seq_printf(m, "%u free dwords in ring\n", rdev->cp.ring_free_dw);
1945
	seq_printf(m, "%u free dwords in ring\n", rdev->cp.ring_free_dw);
1680
	seq_printf(m, "%u dwords in ring\n", count);
1946
	seq_printf(m, "%u dwords in ring\n", count);
1681
	i = rdev->cp.rptr;
1947
	i = rdev->cp.rptr;
1682
	for (j = 0; j <= count; j++) {
1948
	for (j = 0; j <= count; j++) {
1683
		seq_printf(m, "r[%04d]=0x%08x\n", i, rdev->cp.ring[i]);
1949
		seq_printf(m, "r[%04d]=0x%08x\n", i, rdev->cp.ring[i]);
1684
		i = (i + 1) & rdev->cp.ptr_mask;
1950
		i = (i + 1) & rdev->cp.ptr_mask;
1685
	}
1951
	}
1686
	return 0;
1952
	return 0;
1687
}
1953
}
1688
 
1954
 
1689
static int r600_debugfs_mc_info(struct seq_file *m, void *data)
1955
static int r600_debugfs_mc_info(struct seq_file *m, void *data)
1690
{
1956
{
1691
	struct drm_info_node *node = (struct drm_info_node *) m->private;
1957
	struct drm_info_node *node = (struct drm_info_node *) m->private;
1692
	struct drm_device *dev = node->minor->dev;
1958
	struct drm_device *dev = node->minor->dev;
1693
	struct radeon_device *rdev = dev->dev_private;
1959
	struct radeon_device *rdev = dev->dev_private;
1694
 
1960
 
1695
	DREG32_SYS(m, rdev, R_000E50_SRBM_STATUS);
1961
	DREG32_SYS(m, rdev, R_000E50_SRBM_STATUS);
1696
	DREG32_SYS(m, rdev, VM_L2_STATUS);
1962
	DREG32_SYS(m, rdev, VM_L2_STATUS);
1697
	return 0;
1963
	return 0;
1698
}
1964
}
1699
 
1965
 
1700
static struct drm_info_list r600_mc_info_list[] = {
1966
static struct drm_info_list r600_mc_info_list[] = {
1701
	{"r600_mc_info", r600_debugfs_mc_info, 0, NULL},
1967
	{"r600_mc_info", r600_debugfs_mc_info, 0, NULL},
1702
	{"r600_ring_info", r600_debugfs_cp_ring_info, 0, NULL},
1968
	{"r600_ring_info", r600_debugfs_cp_ring_info, 0, NULL},
1703
};
1969
};
1704
#endif
1970
#endif
1705
 
1971
 
1706
int r600_debugfs_mc_info_init(struct radeon_device *rdev)
1972
int r600_debugfs_mc_info_init(struct radeon_device *rdev)
1707
{
1973
{
1708
#if defined(CONFIG_DEBUG_FS)
1974
#if defined(CONFIG_DEBUG_FS)
1709
	return radeon_debugfs_add_files(rdev, r600_mc_info_list, ARRAY_SIZE(r600_mc_info_list));
1975
	return radeon_debugfs_add_files(rdev, r600_mc_info_list, ARRAY_SIZE(r600_mc_info_list));
1710
#else
1976
#else
1711
	return 0;
1977
	return 0;
1712
#endif
1978
#endif
1713
}
1979
}
1714
 
1980
 
1715
/**
1981
/**
1716
 * r600_ioctl_wait_idle - flush host path cache on wait idle ioctl
1982
 * r600_ioctl_wait_idle - flush host path cache on wait idle ioctl
1717
 * rdev: radeon device structure
1983
 * rdev: radeon device structure
1718
 * bo: buffer object struct which userspace is waiting for idle
1984
 * bo: buffer object struct which userspace is waiting for idle
1719
 *
1985
 *
1720
 * Some R6XX/R7XX doesn't seems to take into account HDP flush performed
1986
 * Some R6XX/R7XX doesn't seems to take into account HDP flush performed
1721
 * through ring buffer, this leads to corruption in rendering, see
1987
 * through ring buffer, this leads to corruption in rendering, see
1722
 * http://bugzilla.kernel.org/show_bug.cgi?id=15186 to avoid this we
1988
 * http://bugzilla.kernel.org/show_bug.cgi?id=15186 to avoid this we
1723
 * directly perform HDP flush by writing register through MMIO.
1989
 * directly perform HDP flush by writing register through MMIO.
1724
 */
1990
 */
1725
void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo)
1991
void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo)
1726
{
1992
{
1727
	WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
1993
	WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
1728
}
1994
}
1729
>
1995
>
1730
>
1996
>
1731
>
1997
>
1732
>
1998
>
1733
>
1999
>
1734
>
2000
>