Subversion Repositories Kolibri OS

Rev

Rev 1313 | Rev 1403 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 1313 Rev 1321
1
/*
1
/*
2
 * Copyright 2008 Advanced Micro Devices, Inc.
2
 * Copyright 2008 Advanced Micro Devices, Inc.
3
 * Copyright 2008 Red Hat Inc.
3
 * Copyright 2008 Red Hat Inc.
4
 * Copyright 2009 Jerome Glisse.
4
 * Copyright 2009 Jerome Glisse.
5
 *
5
 *
6
 * Permission is hereby granted, free of charge, to any person obtaining a
6
 * Permission is hereby granted, free of charge, to any person obtaining a
7
 * copy of this software and associated documentation files (the "Software"),
7
 * copy of this software and associated documentation files (the "Software"),
8
 * to deal in the Software without restriction, including without limitation
8
 * to deal in the Software without restriction, including without limitation
9
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10
 * and/or sell copies of the Software, and to permit persons to whom the
10
 * and/or sell copies of the Software, and to permit persons to whom the
11
 * Software is furnished to do so, subject to the following conditions:
11
 * Software is furnished to do so, subject to the following conditions:
12
 *
12
 *
13
 * The above copyright notice and this permission notice shall be included in
13
 * The above copyright notice and this permission notice shall be included in
14
 * all copies or substantial portions of the Software.
14
 * all copies or substantial portions of the Software.
15
 *
15
 *
16
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22
 * OTHER DEALINGS IN THE SOFTWARE.
22
 * OTHER DEALINGS IN THE SOFTWARE.
23
 *
23
 *
24
 * Authors: Dave Airlie
24
 * Authors: Dave Airlie
25
 *          Alex Deucher
25
 *          Alex Deucher
26
 *          Jerome Glisse
26
 *          Jerome Glisse
27
 */
27
 */
28
#include 
28
#include 
29
#include 
29
#include 
30
#include "drmP.h"
30
#include "drmP.h"
31
#include "radeon_drm.h"
31
#include "radeon_drm.h"
32
#include "radeon.h"
32
#include "radeon.h"
33
#include "radeon_mode.h"
33
#include "radeon_mode.h"
34
#include "r600d.h"
34
#include "r600d.h"
35
#include "atom.h"
35
#include "atom.h"
36
#include "avivod.h"
36
#include "avivod.h"
37
 
37
 
38
#define PFP_UCODE_SIZE 576
38
#define PFP_UCODE_SIZE 576
39
#define PM4_UCODE_SIZE 1792
39
#define PM4_UCODE_SIZE 1792
-
 
40
#define RLC_UCODE_SIZE 768
40
#define R700_PFP_UCODE_SIZE 848
41
#define R700_PFP_UCODE_SIZE 848
41
#define R700_PM4_UCODE_SIZE 1360
42
#define R700_PM4_UCODE_SIZE 1360
-
 
43
#define R700_RLC_UCODE_SIZE 1024
42
 
44
 
43
/* Firmware Names */
45
/* Firmware Names */
44
MODULE_FIRMWARE("radeon/R600_pfp.bin");
46
MODULE_FIRMWARE("radeon/R600_pfp.bin");
45
MODULE_FIRMWARE("radeon/R600_me.bin");
47
MODULE_FIRMWARE("radeon/R600_me.bin");
46
MODULE_FIRMWARE("radeon/RV610_pfp.bin");
48
MODULE_FIRMWARE("radeon/RV610_pfp.bin");
47
MODULE_FIRMWARE("radeon/RV610_me.bin");
49
MODULE_FIRMWARE("radeon/RV610_me.bin");
48
MODULE_FIRMWARE("radeon/RV630_pfp.bin");
50
MODULE_FIRMWARE("radeon/RV630_pfp.bin");
49
MODULE_FIRMWARE("radeon/RV630_me.bin");
51
MODULE_FIRMWARE("radeon/RV630_me.bin");
50
MODULE_FIRMWARE("radeon/RV620_pfp.bin");
52
MODULE_FIRMWARE("radeon/RV620_pfp.bin");
51
MODULE_FIRMWARE("radeon/RV620_me.bin");
53
MODULE_FIRMWARE("radeon/RV620_me.bin");
52
MODULE_FIRMWARE("radeon/RV635_pfp.bin");
54
MODULE_FIRMWARE("radeon/RV635_pfp.bin");
53
MODULE_FIRMWARE("radeon/RV635_me.bin");
55
MODULE_FIRMWARE("radeon/RV635_me.bin");
54
MODULE_FIRMWARE("radeon/RV670_pfp.bin");
56
MODULE_FIRMWARE("radeon/RV670_pfp.bin");
55
MODULE_FIRMWARE("radeon/RV670_me.bin");
57
MODULE_FIRMWARE("radeon/RV670_me.bin");
56
MODULE_FIRMWARE("radeon/RS780_pfp.bin");
58
MODULE_FIRMWARE("radeon/RS780_pfp.bin");
57
MODULE_FIRMWARE("radeon/RS780_me.bin");
59
MODULE_FIRMWARE("radeon/RS780_me.bin");
58
MODULE_FIRMWARE("radeon/RV770_pfp.bin");
60
MODULE_FIRMWARE("radeon/RV770_pfp.bin");
59
MODULE_FIRMWARE("radeon/RV770_me.bin");
61
MODULE_FIRMWARE("radeon/RV770_me.bin");
60
MODULE_FIRMWARE("radeon/RV730_pfp.bin");
62
MODULE_FIRMWARE("radeon/RV730_pfp.bin");
61
MODULE_FIRMWARE("radeon/RV730_me.bin");
63
MODULE_FIRMWARE("radeon/RV730_me.bin");
62
MODULE_FIRMWARE("radeon/RV710_pfp.bin");
64
MODULE_FIRMWARE("radeon/RV710_pfp.bin");
63
MODULE_FIRMWARE("radeon/RV710_me.bin");
65
MODULE_FIRMWARE("radeon/RV710_me.bin");
-
 
66
MODULE_FIRMWARE("radeon/R600_rlc.bin");
-
 
67
MODULE_FIRMWARE("radeon/R700_rlc.bin");
64
 
68
 
65
int r600_debugfs_mc_info_init(struct radeon_device *rdev);
69
int r600_debugfs_mc_info_init(struct radeon_device *rdev);
66
 
70
 
67
/* r600,rv610,rv630,rv620,rv635,rv670 */
71
/* r600,rv610,rv630,rv620,rv635,rv670 */
68
int r600_mc_wait_for_idle(struct radeon_device *rdev);
72
int r600_mc_wait_for_idle(struct radeon_device *rdev);
69
void r600_gpu_init(struct radeon_device *rdev);
73
void r600_gpu_init(struct radeon_device *rdev);
70
void r600_fini(struct radeon_device *rdev);
74
void r600_fini(struct radeon_device *rdev);
-
 
75
 
-
 
76
/* hpd for digital panel detect/disconnect */
-
 
77
bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
-
 
78
{
-
 
79
	bool connected = false;
-
 
80
 
-
 
81
	if (ASIC_IS_DCE3(rdev)) {
-
 
82
		switch (hpd) {
-
 
83
		case RADEON_HPD_1:
-
 
84
			if (RREG32(DC_HPD1_INT_STATUS) & DC_HPDx_SENSE)
-
 
85
				connected = true;
-
 
86
			break;
-
 
87
		case RADEON_HPD_2:
-
 
88
			if (RREG32(DC_HPD2_INT_STATUS) & DC_HPDx_SENSE)
-
 
89
				connected = true;
-
 
90
			break;
-
 
91
		case RADEON_HPD_3:
-
 
92
			if (RREG32(DC_HPD3_INT_STATUS) & DC_HPDx_SENSE)
-
 
93
				connected = true;
-
 
94
			break;
-
 
95
		case RADEON_HPD_4:
-
 
96
			if (RREG32(DC_HPD4_INT_STATUS) & DC_HPDx_SENSE)
-
 
97
				connected = true;
-
 
98
			break;
-
 
99
			/* DCE 3.2 */
-
 
100
		case RADEON_HPD_5:
-
 
101
			if (RREG32(DC_HPD5_INT_STATUS) & DC_HPDx_SENSE)
-
 
102
				connected = true;
-
 
103
			break;
-
 
104
		case RADEON_HPD_6:
-
 
105
			if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE)
-
 
106
				connected = true;
-
 
107
			break;
-
 
108
		default:
-
 
109
			break;
-
 
110
		}
-
 
111
	} else {
-
 
112
		switch (hpd) {
-
 
113
		case RADEON_HPD_1:
-
 
114
			if (RREG32(DC_HOT_PLUG_DETECT1_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
-
 
115
				connected = true;
-
 
116
			break;
-
 
117
		case RADEON_HPD_2:
-
 
118
			if (RREG32(DC_HOT_PLUG_DETECT2_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
-
 
119
				connected = true;
-
 
120
			break;
-
 
121
		case RADEON_HPD_3:
-
 
122
			if (RREG32(DC_HOT_PLUG_DETECT3_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
-
 
123
				connected = true;
-
 
124
			break;
-
 
125
		default:
-
 
126
			break;
-
 
127
		}
-
 
128
	}
-
 
129
	return connected;
-
 
130
}
-
 
131
 
-
 
132
void r600_hpd_set_polarity(struct radeon_device *rdev,
-
 
133
			   enum radeon_hpd_id hpd)
-
 
134
{
-
 
135
	u32 tmp;
-
 
136
	bool connected = r600_hpd_sense(rdev, hpd);
-
 
137
 
-
 
138
	if (ASIC_IS_DCE3(rdev)) {
-
 
139
		switch (hpd) {
-
 
140
		case RADEON_HPD_1:
-
 
141
			tmp = RREG32(DC_HPD1_INT_CONTROL);
-
 
142
			if (connected)
-
 
143
				tmp &= ~DC_HPDx_INT_POLARITY;
-
 
144
			else
-
 
145
				tmp |= DC_HPDx_INT_POLARITY;
-
 
146
			WREG32(DC_HPD1_INT_CONTROL, tmp);
-
 
147
			break;
-
 
148
		case RADEON_HPD_2:
-
 
149
			tmp = RREG32(DC_HPD2_INT_CONTROL);
-
 
150
			if (connected)
-
 
151
				tmp &= ~DC_HPDx_INT_POLARITY;
-
 
152
			else
-
 
153
				tmp |= DC_HPDx_INT_POLARITY;
-
 
154
			WREG32(DC_HPD2_INT_CONTROL, tmp);
-
 
155
			break;
-
 
156
		case RADEON_HPD_3:
-
 
157
			tmp = RREG32(DC_HPD3_INT_CONTROL);
-
 
158
			if (connected)
-
 
159
				tmp &= ~DC_HPDx_INT_POLARITY;
-
 
160
			else
-
 
161
				tmp |= DC_HPDx_INT_POLARITY;
-
 
162
			WREG32(DC_HPD3_INT_CONTROL, tmp);
-
 
163
			break;
-
 
164
		case RADEON_HPD_4:
-
 
165
			tmp = RREG32(DC_HPD4_INT_CONTROL);
-
 
166
			if (connected)
-
 
167
				tmp &= ~DC_HPDx_INT_POLARITY;
-
 
168
			else
-
 
169
				tmp |= DC_HPDx_INT_POLARITY;
-
 
170
			WREG32(DC_HPD4_INT_CONTROL, tmp);
-
 
171
			break;
-
 
172
		case RADEON_HPD_5:
-
 
173
			tmp = RREG32(DC_HPD5_INT_CONTROL);
-
 
174
			if (connected)
-
 
175
				tmp &= ~DC_HPDx_INT_POLARITY;
-
 
176
			else
-
 
177
				tmp |= DC_HPDx_INT_POLARITY;
-
 
178
			WREG32(DC_HPD5_INT_CONTROL, tmp);
-
 
179
			break;
-
 
180
			/* DCE 3.2 */
-
 
181
		case RADEON_HPD_6:
-
 
182
			tmp = RREG32(DC_HPD6_INT_CONTROL);
-
 
183
			if (connected)
-
 
184
				tmp &= ~DC_HPDx_INT_POLARITY;
-
 
185
			else
-
 
186
				tmp |= DC_HPDx_INT_POLARITY;
-
 
187
			WREG32(DC_HPD6_INT_CONTROL, tmp);
-
 
188
			break;
-
 
189
		default:
-
 
190
			break;
-
 
191
		}
-
 
192
	} else {
-
 
193
		switch (hpd) {
-
 
194
		case RADEON_HPD_1:
-
 
195
			tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL);
-
 
196
			if (connected)
-
 
197
				tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
-
 
198
			else
-
 
199
				tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
-
 
200
			WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
-
 
201
			break;
-
 
202
		case RADEON_HPD_2:
-
 
203
			tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL);
-
 
204
			if (connected)
-
 
205
				tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
-
 
206
			else
-
 
207
				tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
-
 
208
			WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
-
 
209
			break;
-
 
210
		case RADEON_HPD_3:
-
 
211
			tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL);
-
 
212
			if (connected)
-
 
213
				tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
-
 
214
			else
-
 
215
				tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
-
 
216
			WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
-
 
217
			break;
-
 
218
		default:
-
 
219
			break;
-
 
220
		}
-
 
221
	}
-
 
222
}
-
 
223
 
-
 
224
void r600_hpd_init(struct radeon_device *rdev)
-
 
225
{
-
 
226
	struct drm_device *dev = rdev->ddev;
-
 
227
	struct drm_connector *connector;
-
 
228
 
-
 
229
	if (ASIC_IS_DCE3(rdev)) {
-
 
230
		u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) | DC_HPDx_RX_INT_TIMER(0xfa);
-
 
231
		if (ASIC_IS_DCE32(rdev))
-
 
232
			tmp |= DC_HPDx_EN;
-
 
233
 
-
 
234
		list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
-
 
235
			struct radeon_connector *radeon_connector = to_radeon_connector(connector);
-
 
236
			switch (radeon_connector->hpd.hpd) {
-
 
237
			case RADEON_HPD_1:
-
 
238
				WREG32(DC_HPD1_CONTROL, tmp);
-
 
239
				rdev->irq.hpd[0] = true;
-
 
240
				break;
-
 
241
			case RADEON_HPD_2:
-
 
242
				WREG32(DC_HPD2_CONTROL, tmp);
-
 
243
				rdev->irq.hpd[1] = true;
-
 
244
				break;
-
 
245
			case RADEON_HPD_3:
-
 
246
				WREG32(DC_HPD3_CONTROL, tmp);
-
 
247
				rdev->irq.hpd[2] = true;
-
 
248
				break;
-
 
249
			case RADEON_HPD_4:
-
 
250
				WREG32(DC_HPD4_CONTROL, tmp);
-
 
251
				rdev->irq.hpd[3] = true;
-
 
252
				break;
-
 
253
				/* DCE 3.2 */
-
 
254
			case RADEON_HPD_5:
-
 
255
				WREG32(DC_HPD5_CONTROL, tmp);
-
 
256
				rdev->irq.hpd[4] = true;
-
 
257
				break;
-
 
258
			case RADEON_HPD_6:
-
 
259
				WREG32(DC_HPD6_CONTROL, tmp);
-
 
260
				rdev->irq.hpd[5] = true;
-
 
261
				break;
-
 
262
			default:
-
 
263
				break;
-
 
264
			}
-
 
265
		}
-
 
266
	} else {
-
 
267
		list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
-
 
268
			struct radeon_connector *radeon_connector = to_radeon_connector(connector);
-
 
269
			switch (radeon_connector->hpd.hpd) {
-
 
270
			case RADEON_HPD_1:
-
 
271
				WREG32(DC_HOT_PLUG_DETECT1_CONTROL, DC_HOT_PLUG_DETECTx_EN);
-
 
272
				rdev->irq.hpd[0] = true;
-
 
273
				break;
-
 
274
			case RADEON_HPD_2:
-
 
275
				WREG32(DC_HOT_PLUG_DETECT2_CONTROL, DC_HOT_PLUG_DETECTx_EN);
-
 
276
				rdev->irq.hpd[1] = true;
-
 
277
				break;
-
 
278
			case RADEON_HPD_3:
-
 
279
				WREG32(DC_HOT_PLUG_DETECT3_CONTROL, DC_HOT_PLUG_DETECTx_EN);
-
 
280
				rdev->irq.hpd[2] = true;
-
 
281
				break;
-
 
282
			default:
-
 
283
				break;
-
 
284
			}
-
 
285
		}
-
 
286
	}
-
 
287
	r600_irq_set(rdev);
-
 
288
}
-
 
289
 
-
 
290
void r600_hpd_fini(struct radeon_device *rdev)
-
 
291
{
-
 
292
	struct drm_device *dev = rdev->ddev;
-
 
293
	struct drm_connector *connector;
-
 
294
 
-
 
295
	if (ASIC_IS_DCE3(rdev)) {
-
 
296
		list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
-
 
297
			struct radeon_connector *radeon_connector = to_radeon_connector(connector);
-
 
298
			switch (radeon_connector->hpd.hpd) {
-
 
299
			case RADEON_HPD_1:
-
 
300
				WREG32(DC_HPD1_CONTROL, 0);
-
 
301
				rdev->irq.hpd[0] = false;
-
 
302
				break;
-
 
303
			case RADEON_HPD_2:
-
 
304
				WREG32(DC_HPD2_CONTROL, 0);
-
 
305
				rdev->irq.hpd[1] = false;
-
 
306
				break;
-
 
307
			case RADEON_HPD_3:
-
 
308
				WREG32(DC_HPD3_CONTROL, 0);
-
 
309
				rdev->irq.hpd[2] = false;
-
 
310
				break;
-
 
311
			case RADEON_HPD_4:
-
 
312
				WREG32(DC_HPD4_CONTROL, 0);
-
 
313
				rdev->irq.hpd[3] = false;
-
 
314
				break;
-
 
315
				/* DCE 3.2 */
-
 
316
			case RADEON_HPD_5:
-
 
317
				WREG32(DC_HPD5_CONTROL, 0);
-
 
318
				rdev->irq.hpd[4] = false;
-
 
319
				break;
-
 
320
			case RADEON_HPD_6:
-
 
321
				WREG32(DC_HPD6_CONTROL, 0);
-
 
322
				rdev->irq.hpd[5] = false;
-
 
323
				break;
-
 
324
			default:
-
 
325
				break;
-
 
326
			}
-
 
327
		}
-
 
328
	} else {
-
 
329
		list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
-
 
330
			struct radeon_connector *radeon_connector = to_radeon_connector(connector);
-
 
331
			switch (radeon_connector->hpd.hpd) {
-
 
332
			case RADEON_HPD_1:
-
 
333
				WREG32(DC_HOT_PLUG_DETECT1_CONTROL, 0);
-
 
334
				rdev->irq.hpd[0] = false;
-
 
335
				break;
-
 
336
			case RADEON_HPD_2:
-
 
337
				WREG32(DC_HOT_PLUG_DETECT2_CONTROL, 0);
-
 
338
				rdev->irq.hpd[1] = false;
-
 
339
				break;
-
 
340
			case RADEON_HPD_3:
-
 
341
				WREG32(DC_HOT_PLUG_DETECT3_CONTROL, 0);
-
 
342
				rdev->irq.hpd[2] = false;
-
 
343
				break;
-
 
344
			default:
-
 
345
				break;
-
 
346
			}
-
 
347
		}
-
 
348
	}
-
 
349
}
71
 
350
 
72
/*
351
/*
73
 * R600 PCIE GART
352
 * R600 PCIE GART
74
 */
353
 */
75
int r600_gart_clear_page(struct radeon_device *rdev, int i)
354
int r600_gart_clear_page(struct radeon_device *rdev, int i)
76
{
355
{
77
	void __iomem *ptr = (void *)rdev->gart.table.vram.ptr;
356
	void __iomem *ptr = (void *)rdev->gart.table.vram.ptr;
78
	u64 pte;
357
	u64 pte;
79
 
358
 
80
	if (i < 0 || i > rdev->gart.num_gpu_pages)
359
	if (i < 0 || i > rdev->gart.num_gpu_pages)
81
		return -EINVAL;
360
		return -EINVAL;
82
	pte = 0;
361
	pte = 0;
83
	writeq(pte, ((void __iomem *)ptr) + (i * 8));
362
	writeq(pte, ((void __iomem *)ptr) + (i * 8));
84
	return 0;
363
	return 0;
85
}
364
}
86
 
365
 
87
void r600_pcie_gart_tlb_flush(struct radeon_device *rdev)
366
void r600_pcie_gart_tlb_flush(struct radeon_device *rdev)
88
{
367
{
89
	unsigned i;
368
	unsigned i;
90
	u32 tmp;
369
	u32 tmp;
91
 
370
 
92
	WREG32(VM_CONTEXT0_INVALIDATION_LOW_ADDR, rdev->mc.gtt_start >> 12);
371
	WREG32(VM_CONTEXT0_INVALIDATION_LOW_ADDR, rdev->mc.gtt_start >> 12);
93
	WREG32(VM_CONTEXT0_INVALIDATION_HIGH_ADDR, (rdev->mc.gtt_end - 1) >> 12);
372
	WREG32(VM_CONTEXT0_INVALIDATION_HIGH_ADDR, (rdev->mc.gtt_end - 1) >> 12);
94
	WREG32(VM_CONTEXT0_REQUEST_RESPONSE, REQUEST_TYPE(1));
373
	WREG32(VM_CONTEXT0_REQUEST_RESPONSE, REQUEST_TYPE(1));
95
	for (i = 0; i < rdev->usec_timeout; i++) {
374
	for (i = 0; i < rdev->usec_timeout; i++) {
96
		/* read MC_STATUS */
375
		/* read MC_STATUS */
97
		tmp = RREG32(VM_CONTEXT0_REQUEST_RESPONSE);
376
		tmp = RREG32(VM_CONTEXT0_REQUEST_RESPONSE);
98
		tmp = (tmp & RESPONSE_TYPE_MASK) >> RESPONSE_TYPE_SHIFT;
377
		tmp = (tmp & RESPONSE_TYPE_MASK) >> RESPONSE_TYPE_SHIFT;
99
		if (tmp == 2) {
378
		if (tmp == 2) {
100
			printk(KERN_WARNING "[drm] r600 flush TLB failed\n");
379
			printk(KERN_WARNING "[drm] r600 flush TLB failed\n");
101
			return;
380
			return;
102
		}
381
		}
103
		if (tmp) {
382
		if (tmp) {
104
			return;
383
			return;
105
		}
384
		}
106
		udelay(1);
385
		udelay(1);
107
	}
386
	}
108
}
387
}
109
 
388
 
110
int r600_pcie_gart_init(struct radeon_device *rdev)
389
int r600_pcie_gart_init(struct radeon_device *rdev)
111
{
390
{
112
	int r;
391
	int r;
113
 
392
 
114
	if (rdev->gart.table.vram.robj) {
393
	if (rdev->gart.table.vram.robj) {
115
		WARN(1, "R600 PCIE GART already initialized.\n");
394
		WARN(1, "R600 PCIE GART already initialized.\n");
116
		return 0;
395
		return 0;
117
	}
396
	}
118
	/* Initialize common gart structure */
397
	/* Initialize common gart structure */
119
	r = radeon_gart_init(rdev);
398
	r = radeon_gart_init(rdev);
120
	if (r)
399
	if (r)
121
		return r;
400
		return r;
122
	rdev->gart.table_size = rdev->gart.num_gpu_pages * 8;
401
	rdev->gart.table_size = rdev->gart.num_gpu_pages * 8;
123
	return radeon_gart_table_vram_alloc(rdev);
402
	return radeon_gart_table_vram_alloc(rdev);
124
}
403
}
125
 
404
 
126
int r600_pcie_gart_enable(struct radeon_device *rdev)
405
int r600_pcie_gart_enable(struct radeon_device *rdev)
127
{
406
{
128
	u32 tmp;
407
	u32 tmp;
129
	int r, i;
408
	int r, i;
130
 
409
 
131
	if (rdev->gart.table.vram.robj == NULL) {
410
	if (rdev->gart.table.vram.robj == NULL) {
132
		dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
411
		dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
133
		return -EINVAL;
412
		return -EINVAL;
134
	}
413
	}
135
	r = radeon_gart_table_vram_pin(rdev);
414
	r = radeon_gart_table_vram_pin(rdev);
136
	if (r)
415
	if (r)
137
		return r;
416
		return r;
138
 
417
 
139
	/* Setup L2 cache */
418
	/* Setup L2 cache */
140
	WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
419
	WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
141
				ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
420
				ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
142
				EFFECTIVE_L2_QUEUE_SIZE(7));
421
				EFFECTIVE_L2_QUEUE_SIZE(7));
143
	WREG32(VM_L2_CNTL2, 0);
422
	WREG32(VM_L2_CNTL2, 0);
144
	WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
423
	WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
145
	/* Setup TLB control */
424
	/* Setup TLB control */
146
	tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
425
	tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
147
		SYSTEM_ACCESS_MODE_NOT_IN_SYS |
426
		SYSTEM_ACCESS_MODE_NOT_IN_SYS |
148
		EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
427
		EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
149
		ENABLE_WAIT_L2_QUERY;
428
		ENABLE_WAIT_L2_QUERY;
150
	WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
429
	WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
151
	WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
430
	WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
152
	WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp | ENABLE_L1_STRICT_ORDERING);
431
	WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp | ENABLE_L1_STRICT_ORDERING);
153
	WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
432
	WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
154
	WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
433
	WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
155
	WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
434
	WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
156
	WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
435
	WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
157
	WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
436
	WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
158
	WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
437
	WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
159
	WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
438
	WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
160
	WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
439
	WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
161
	WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
440
	WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
162
	WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
441
	WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
163
	WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
442
	WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
164
	WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
443
	WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
165
	WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
444
	WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
166
	WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
445
	WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
167
	WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
446
	WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
168
				RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
447
				RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
169
	WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
448
	WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
170
			(u32)(rdev->dummy_page.addr >> 12));
449
			(u32)(rdev->dummy_page.addr >> 12));
171
	for (i = 1; i < 7; i++)
450
	for (i = 1; i < 7; i++)
172
		WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
451
		WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
173
 
452
 
174
	r600_pcie_gart_tlb_flush(rdev);
453
	r600_pcie_gart_tlb_flush(rdev);
175
	rdev->gart.ready = true;
454
	rdev->gart.ready = true;
176
	return 0;
455
	return 0;
177
}
456
}
178
 
457
 
179
void r600_pcie_gart_disable(struct radeon_device *rdev)
458
void r600_pcie_gart_disable(struct radeon_device *rdev)
180
{
459
{
181
	u32 tmp;
460
	u32 tmp;
182
	int i;
461
	int i, r;
183
 
462
 
184
	/* Disable all tables */
463
	/* Disable all tables */
185
	for (i = 0; i < 7; i++)
464
	for (i = 0; i < 7; i++)
186
		WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
465
		WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
187
 
466
 
188
	/* Disable L2 cache */
467
	/* Disable L2 cache */
189
	WREG32(VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING |
468
	WREG32(VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING |
190
				EFFECTIVE_L2_QUEUE_SIZE(7));
469
				EFFECTIVE_L2_QUEUE_SIZE(7));
191
	WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
470
	WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
192
	/* Setup L1 TLB control */
471
	/* Setup L1 TLB control */
193
	tmp = EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
472
	tmp = EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
194
		ENABLE_WAIT_L2_QUERY;
473
		ENABLE_WAIT_L2_QUERY;
195
	WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
474
	WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
196
	WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
475
	WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
197
	WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
476
	WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
198
	WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
477
	WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
199
	WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
478
	WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
200
	WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
479
	WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
201
	WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
480
	WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
202
	WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
481
	WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
203
	WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp);
482
	WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp);
204
	WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp);
483
	WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp);
205
	WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
484
	WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
206
	WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
485
	WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
207
	WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp);
486
	WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp);
208
	WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
487
	WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
209
	if (rdev->gart.table.vram.robj) {
488
	if (rdev->gart.table.vram.robj) {
210
//       radeon_object_kunmap(rdev->gart.table.vram.robj);
489
//       radeon_object_kunmap(rdev->gart.table.vram.robj);
211
//       radeon_object_unpin(rdev->gart.table.vram.robj);
490
//       radeon_object_unpin(rdev->gart.table.vram.robj);
212
	}
491
	}
213
}
492
}
214
 
493
 
215
void r600_pcie_gart_fini(struct radeon_device *rdev)
494
void r600_pcie_gart_fini(struct radeon_device *rdev)
216
{
495
{
217
	r600_pcie_gart_disable(rdev);
496
	r600_pcie_gart_disable(rdev);
218
	radeon_gart_table_vram_free(rdev);
497
	radeon_gart_table_vram_free(rdev);
219
	radeon_gart_fini(rdev);
498
	radeon_gart_fini(rdev);
220
}
499
}
221
 
500
 
222
void r600_agp_enable(struct radeon_device *rdev)
501
void r600_agp_enable(struct radeon_device *rdev)
223
{
502
{
224
	u32 tmp;
503
	u32 tmp;
225
	int i;
504
	int i;
226
 
505
 
227
	/* Setup L2 cache */
506
	/* Setup L2 cache */
228
	WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
507
	WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
229
				ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
508
				ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
230
				EFFECTIVE_L2_QUEUE_SIZE(7));
509
				EFFECTIVE_L2_QUEUE_SIZE(7));
231
	WREG32(VM_L2_CNTL2, 0);
510
	WREG32(VM_L2_CNTL2, 0);
232
	WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
511
	WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
233
	/* Setup TLB control */
512
	/* Setup TLB control */
234
	tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
513
	tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
235
		SYSTEM_ACCESS_MODE_NOT_IN_SYS |
514
		SYSTEM_ACCESS_MODE_NOT_IN_SYS |
236
		EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
515
		EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
237
		ENABLE_WAIT_L2_QUERY;
516
		ENABLE_WAIT_L2_QUERY;
238
	WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
517
	WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
239
	WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
518
	WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
240
	WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp | ENABLE_L1_STRICT_ORDERING);
519
	WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp | ENABLE_L1_STRICT_ORDERING);
241
	WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
520
	WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
242
	WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
521
	WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
243
	WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
522
	WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
244
	WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
523
	WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
245
	WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
524
	WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
246
	WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
525
	WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
247
	WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
526
	WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
248
	WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
527
	WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
249
	WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
528
	WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
250
	WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
529
	WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
251
	WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
530
	WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
252
	for (i = 0; i < 7; i++)
531
	for (i = 0; i < 7; i++)
253
		WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
532
		WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
254
}
533
}
255
 
534
 
256
int r600_mc_wait_for_idle(struct radeon_device *rdev)
535
int r600_mc_wait_for_idle(struct radeon_device *rdev)
257
{
536
{
258
	unsigned i;
537
	unsigned i;
259
	u32 tmp;
538
	u32 tmp;
260
 
539
 
261
	for (i = 0; i < rdev->usec_timeout; i++) {
540
	for (i = 0; i < rdev->usec_timeout; i++) {
262
		/* read MC_STATUS */
541
		/* read MC_STATUS */
263
		tmp = RREG32(R_000E50_SRBM_STATUS) & 0x3F00;
542
		tmp = RREG32(R_000E50_SRBM_STATUS) & 0x3F00;
264
		if (!tmp)
543
		if (!tmp)
265
	return 0;
544
	return 0;
266
		udelay(1);
545
		udelay(1);
267
	}
546
	}
268
	return -1;
547
	return -1;
269
}
548
}
270
 
549
 
271
static void r600_mc_program(struct radeon_device *rdev)
550
static void r600_mc_program(struct radeon_device *rdev)
272
{
551
{
273
	struct rv515_mc_save save;
552
	struct rv515_mc_save save;
274
	u32 tmp;
553
	u32 tmp;
275
	int i, j;
554
	int i, j;
276
 
555
 
277
	/* Initialize HDP */
556
	/* Initialize HDP */
278
	for (i = 0, j = 0; i < 32; i++, j += 0x18) {
557
	for (i = 0, j = 0; i < 32; i++, j += 0x18) {
279
		WREG32((0x2c14 + j), 0x00000000);
558
		WREG32((0x2c14 + j), 0x00000000);
280
		WREG32((0x2c18 + j), 0x00000000);
559
		WREG32((0x2c18 + j), 0x00000000);
281
		WREG32((0x2c1c + j), 0x00000000);
560
		WREG32((0x2c1c + j), 0x00000000);
282
		WREG32((0x2c20 + j), 0x00000000);
561
		WREG32((0x2c20 + j), 0x00000000);
283
		WREG32((0x2c24 + j), 0x00000000);
562
		WREG32((0x2c24 + j), 0x00000000);
284
	}
563
	}
285
	WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
564
	WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
286
 
565
 
287
	rv515_mc_stop(rdev, &save);
566
	rv515_mc_stop(rdev, &save);
288
	if (r600_mc_wait_for_idle(rdev)) {
567
	if (r600_mc_wait_for_idle(rdev)) {
289
		dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
568
		dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
290
	}
569
	}
291
	/* Lockout access through VGA aperture (doesn't exist before R600) */
570
	/* Lockout access through VGA aperture (doesn't exist before R600) */
292
	WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
571
	WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
293
	/* Update configuration */
572
	/* Update configuration */
294
	if (rdev->flags & RADEON_IS_AGP) {
573
	if (rdev->flags & RADEON_IS_AGP) {
295
		if (rdev->mc.vram_start < rdev->mc.gtt_start) {
574
		if (rdev->mc.vram_start < rdev->mc.gtt_start) {
296
			/* VRAM before AGP */
575
			/* VRAM before AGP */
297
			WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
576
			WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
298
				rdev->mc.vram_start >> 12);
577
				rdev->mc.vram_start >> 12);
299
			WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
578
			WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
300
				rdev->mc.gtt_end >> 12);
579
				rdev->mc.gtt_end >> 12);
301
		} else {
580
		} else {
302
			/* VRAM after AGP */
581
			/* VRAM after AGP */
303
			WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
582
			WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
304
				rdev->mc.gtt_start >> 12);
583
				rdev->mc.gtt_start >> 12);
305
			WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
584
			WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
306
				rdev->mc.vram_end >> 12);
585
				rdev->mc.vram_end >> 12);
307
		}
586
		}
308
	} else {
587
	} else {
309
		WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start >> 12);
588
		WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start >> 12);
310
		WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.vram_end >> 12);
589
		WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.vram_end >> 12);
311
	}
590
	}
312
	WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0);
591
	WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0);
313
	tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
592
	tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
314
	tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
593
	tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
315
	WREG32(MC_VM_FB_LOCATION, tmp);
594
	WREG32(MC_VM_FB_LOCATION, tmp);
316
	WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
595
	WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
317
	WREG32(HDP_NONSURFACE_INFO, (2 << 7));
596
	WREG32(HDP_NONSURFACE_INFO, (2 << 7));
318
	WREG32(HDP_NONSURFACE_SIZE, rdev->mc.mc_vram_size | 0x3FF);
597
	WREG32(HDP_NONSURFACE_SIZE, rdev->mc.mc_vram_size | 0x3FF);
319
	if (rdev->flags & RADEON_IS_AGP) {
598
	if (rdev->flags & RADEON_IS_AGP) {
320
		WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 22);
599
		WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 22);
321
		WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 22);
600
		WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 22);
322
		WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22);
601
		WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22);
323
	} else {
602
	} else {
324
		WREG32(MC_VM_AGP_BASE, 0);
603
		WREG32(MC_VM_AGP_BASE, 0);
325
		WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
604
		WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
326
		WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
605
		WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
327
	}
606
	}
328
	if (r600_mc_wait_for_idle(rdev)) {
607
	if (r600_mc_wait_for_idle(rdev)) {
329
		dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
608
		dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
330
	}
609
	}
331
	rv515_mc_resume(rdev, &save);
610
	rv515_mc_resume(rdev, &save);
332
	/* we need to own VRAM, so turn off the VGA renderer here
611
	/* we need to own VRAM, so turn off the VGA renderer here
333
	 * to stop it overwriting our objects */
612
	 * to stop it overwriting our objects */
334
	rv515_vga_render_disable(rdev);
613
	rv515_vga_render_disable(rdev);
335
}
614
}
336
 
615
 
337
int r600_mc_init(struct radeon_device *rdev)
616
int r600_mc_init(struct radeon_device *rdev)
338
{
617
{
339
	fixed20_12 a;
618
	fixed20_12 a;
340
	u32 tmp;
619
	u32 tmp;
341
	int chansize, numchan;
620
	int chansize, numchan;
342
	int r;
621
	int r;
343
 
622
 
344
	/* Get VRAM informations */
623
	/* Get VRAM informations */
345
	rdev->mc.vram_is_ddr = true;
624
	rdev->mc.vram_is_ddr = true;
346
	tmp = RREG32(RAMCFG);
625
	tmp = RREG32(RAMCFG);
347
	if (tmp & CHANSIZE_OVERRIDE) {
626
	if (tmp & CHANSIZE_OVERRIDE) {
348
		chansize = 16;
627
		chansize = 16;
349
	} else if (tmp & CHANSIZE_MASK) {
628
	} else if (tmp & CHANSIZE_MASK) {
350
		chansize = 64;
629
		chansize = 64;
351
	} else {
630
	} else {
352
		chansize = 32;
631
		chansize = 32;
353
	}
632
	}
354
	tmp = RREG32(CHMAP);
633
	tmp = RREG32(CHMAP);
355
	switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
634
	switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
356
	case 0:
635
	case 0:
357
	default:
636
	default:
358
		numchan = 1;
637
		numchan = 1;
359
		break;
638
		break;
360
	case 1:
639
	case 1:
361
		numchan = 2;
640
		numchan = 2;
362
		break;
641
		break;
363
	case 2:
642
	case 2:
364
		numchan = 4;
643
		numchan = 4;
365
		break;
644
		break;
366
	case 3:
645
	case 3:
367
		numchan = 8;
646
		numchan = 8;
368
		break;
647
		break;
369
	}
648
	}
370
	rdev->mc.vram_width = numchan * chansize;
649
	rdev->mc.vram_width = numchan * chansize;
371
	/* Could aper size report 0 ? */
650
	/* Could aper size report 0 ? */
372
	rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
651
	rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
373
	rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
652
	rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
374
	/* Setup GPU memory space */
653
	/* Setup GPU memory space */
375
	rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
654
	rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
376
	rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
655
	rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
377
 
656
 
378
	if (rdev->mc.mc_vram_size > rdev->mc.aper_size)
657
	if (rdev->mc.mc_vram_size > rdev->mc.aper_size)
379
		rdev->mc.mc_vram_size = rdev->mc.aper_size;
658
		rdev->mc.mc_vram_size = rdev->mc.aper_size;
380
 
659
 
381
	if (rdev->mc.real_vram_size > rdev->mc.aper_size)
660
	if (rdev->mc.real_vram_size > rdev->mc.aper_size)
382
		rdev->mc.real_vram_size = rdev->mc.aper_size;
661
		rdev->mc.real_vram_size = rdev->mc.aper_size;
383
 
662
 
384
	if (rdev->flags & RADEON_IS_AGP) {
663
	if (rdev->flags & RADEON_IS_AGP) {
385
		r = radeon_agp_init(rdev);
664
		r = radeon_agp_init(rdev);
386
		if (r)
665
		if (r)
387
			return r;
666
			return r;
388
		/* gtt_size is setup by radeon_agp_init */
667
		/* gtt_size is setup by radeon_agp_init */
389
		rdev->mc.gtt_location = rdev->mc.agp_base;
668
		rdev->mc.gtt_location = rdev->mc.agp_base;
390
		tmp = 0xFFFFFFFFUL - rdev->mc.agp_base - rdev->mc.gtt_size;
669
		tmp = 0xFFFFFFFFUL - rdev->mc.agp_base - rdev->mc.gtt_size;
391
		/* Try to put vram before or after AGP because we
670
		/* Try to put vram before or after AGP because we
392
		 * we want SYSTEM_APERTURE to cover both VRAM and
671
		 * we want SYSTEM_APERTURE to cover both VRAM and
393
		 * AGP so that GPU can catch out of VRAM/AGP access
672
		 * AGP so that GPU can catch out of VRAM/AGP access
394
		 */
673
		 */
395
		if (rdev->mc.gtt_location > rdev->mc.mc_vram_size) {
674
		if (rdev->mc.gtt_location > rdev->mc.mc_vram_size) {
396
			/* Enough place before */
675
			/* Enough place before */
397
			rdev->mc.vram_location = rdev->mc.gtt_location -
676
			rdev->mc.vram_location = rdev->mc.gtt_location -
398
							rdev->mc.mc_vram_size;
677
							rdev->mc.mc_vram_size;
399
		} else if (tmp > rdev->mc.mc_vram_size) {
678
		} else if (tmp > rdev->mc.mc_vram_size) {
400
			/* Enough place after */
679
			/* Enough place after */
401
			rdev->mc.vram_location = rdev->mc.gtt_location +
680
			rdev->mc.vram_location = rdev->mc.gtt_location +
402
							rdev->mc.gtt_size;
681
							rdev->mc.gtt_size;
403
		} else {
682
		} else {
404
			/* Try to setup VRAM then AGP might not
683
			/* Try to setup VRAM then AGP might not
405
			 * not work on some card
684
			 * not work on some card
406
			 */
685
			 */
407
			rdev->mc.vram_location = 0x00000000UL;
686
			rdev->mc.vram_location = 0x00000000UL;
408
			rdev->mc.gtt_location = rdev->mc.mc_vram_size;
687
			rdev->mc.gtt_location = rdev->mc.mc_vram_size;
409
		}
688
		}
410
	} else {
689
	} else {
411
		rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
690
		rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
412
			rdev->mc.vram_location = (RREG32(MC_VM_FB_LOCATION) &
691
			rdev->mc.vram_location = (RREG32(MC_VM_FB_LOCATION) &
413
								0xFFFF) << 24;
692
								0xFFFF) << 24;
414
			tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size;
693
			tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size;
415
			if ((0xFFFFFFFFUL - tmp) >= rdev->mc.gtt_size) {
694
			if ((0xFFFFFFFFUL - tmp) >= rdev->mc.gtt_size) {
416
				/* Enough place after vram */
695
				/* Enough place after vram */
417
				rdev->mc.gtt_location = tmp;
696
				rdev->mc.gtt_location = tmp;
418
			} else if (rdev->mc.vram_location >= rdev->mc.gtt_size) {
697
			} else if (rdev->mc.vram_location >= rdev->mc.gtt_size) {
419
				/* Enough place before vram */
698
				/* Enough place before vram */
420
				rdev->mc.gtt_location = 0;
699
				rdev->mc.gtt_location = 0;
421
			} else {
700
			} else {
422
				/* Not enough place after or before shrink
701
				/* Not enough place after or before shrink
423
				 * gart size
702
				 * gart size
424
				 */
703
				 */
425
				if (rdev->mc.vram_location > (0xFFFFFFFFUL - tmp)) {
704
				if (rdev->mc.vram_location > (0xFFFFFFFFUL - tmp)) {
426
					rdev->mc.gtt_location = 0;
705
					rdev->mc.gtt_location = 0;
427
					rdev->mc.gtt_size = rdev->mc.vram_location;
706
					rdev->mc.gtt_size = rdev->mc.vram_location;
428
				} else {
707
				} else {
429
					rdev->mc.gtt_location = tmp;
708
					rdev->mc.gtt_location = tmp;
430
					rdev->mc.gtt_size = 0xFFFFFFFFUL - tmp;
709
					rdev->mc.gtt_size = 0xFFFFFFFFUL - tmp;
431
				}
710
				}
432
			}
711
			}
433
			rdev->mc.gtt_location = rdev->mc.mc_vram_size;
712
			rdev->mc.gtt_location = rdev->mc.mc_vram_size;
434
	}
713
	}
435
	rdev->mc.vram_start = rdev->mc.vram_location;
714
	rdev->mc.vram_start = rdev->mc.vram_location;
436
	rdev->mc.vram_end = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1;
715
	rdev->mc.vram_end = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1;
437
	rdev->mc.gtt_start = rdev->mc.gtt_location;
716
	rdev->mc.gtt_start = rdev->mc.gtt_location;
438
	rdev->mc.gtt_end = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
717
	rdev->mc.gtt_end = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
439
	/* FIXME: we should enforce default clock in case GPU is not in
718
	/* FIXME: we should enforce default clock in case GPU is not in
440
	 * default setup
719
	 * default setup
441
	 */
720
	 */
442
	a.full = rfixed_const(100);
721
	a.full = rfixed_const(100);
443
	rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk);
722
	rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk);
444
	rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
723
	rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
445
	return 0;
724
	return 0;
446
}
725
}
447
 
726
 
448
/* We doesn't check that the GPU really needs a reset we simply do the
727
/* We doesn't check that the GPU really needs a reset we simply do the
449
 * reset, it's up to the caller to determine if the GPU needs one. We
728
 * reset, it's up to the caller to determine if the GPU needs one. We
450
 * might add an helper function to check that.
729
 * might add an helper function to check that.
451
 */
730
 */
452
int r600_gpu_soft_reset(struct radeon_device *rdev)
731
int r600_gpu_soft_reset(struct radeon_device *rdev)
453
{
732
{
454
	struct rv515_mc_save save;
733
	struct rv515_mc_save save;
455
	u32 grbm_busy_mask = S_008010_VC_BUSY(1) | S_008010_VGT_BUSY_NO_DMA(1) |
734
	u32 grbm_busy_mask = S_008010_VC_BUSY(1) | S_008010_VGT_BUSY_NO_DMA(1) |
456
				S_008010_VGT_BUSY(1) | S_008010_TA03_BUSY(1) |
735
				S_008010_VGT_BUSY(1) | S_008010_TA03_BUSY(1) |
457
				S_008010_TC_BUSY(1) | S_008010_SX_BUSY(1) |
736
				S_008010_TC_BUSY(1) | S_008010_SX_BUSY(1) |
458
				S_008010_SH_BUSY(1) | S_008010_SPI03_BUSY(1) |
737
				S_008010_SH_BUSY(1) | S_008010_SPI03_BUSY(1) |
459
				S_008010_SMX_BUSY(1) | S_008010_SC_BUSY(1) |
738
				S_008010_SMX_BUSY(1) | S_008010_SC_BUSY(1) |
460
				S_008010_PA_BUSY(1) | S_008010_DB03_BUSY(1) |
739
				S_008010_PA_BUSY(1) | S_008010_DB03_BUSY(1) |
461
				S_008010_CR_BUSY(1) | S_008010_CB03_BUSY(1) |
740
				S_008010_CR_BUSY(1) | S_008010_CB03_BUSY(1) |
462
				S_008010_GUI_ACTIVE(1);
741
				S_008010_GUI_ACTIVE(1);
463
	u32 grbm2_busy_mask = S_008014_SPI0_BUSY(1) | S_008014_SPI1_BUSY(1) |
742
	u32 grbm2_busy_mask = S_008014_SPI0_BUSY(1) | S_008014_SPI1_BUSY(1) |
464
			S_008014_SPI2_BUSY(1) | S_008014_SPI3_BUSY(1) |
743
			S_008014_SPI2_BUSY(1) | S_008014_SPI3_BUSY(1) |
465
			S_008014_TA0_BUSY(1) | S_008014_TA1_BUSY(1) |
744
			S_008014_TA0_BUSY(1) | S_008014_TA1_BUSY(1) |
466
			S_008014_TA2_BUSY(1) | S_008014_TA3_BUSY(1) |
745
			S_008014_TA2_BUSY(1) | S_008014_TA3_BUSY(1) |
467
			S_008014_DB0_BUSY(1) | S_008014_DB1_BUSY(1) |
746
			S_008014_DB0_BUSY(1) | S_008014_DB1_BUSY(1) |
468
			S_008014_DB2_BUSY(1) | S_008014_DB3_BUSY(1) |
747
			S_008014_DB2_BUSY(1) | S_008014_DB3_BUSY(1) |
469
			S_008014_CB0_BUSY(1) | S_008014_CB1_BUSY(1) |
748
			S_008014_CB0_BUSY(1) | S_008014_CB1_BUSY(1) |
470
			S_008014_CB2_BUSY(1) | S_008014_CB3_BUSY(1);
749
			S_008014_CB2_BUSY(1) | S_008014_CB3_BUSY(1);
471
	u32 srbm_reset = 0;
750
	u32 srbm_reset = 0;
472
	u32 tmp;
751
	u32 tmp;
473
 
752
 
474
	dev_info(rdev->dev, "GPU softreset \n");
753
	dev_info(rdev->dev, "GPU softreset \n");
475
	dev_info(rdev->dev, "  R_008010_GRBM_STATUS=0x%08X\n",
754
	dev_info(rdev->dev, "  R_008010_GRBM_STATUS=0x%08X\n",
476
		RREG32(R_008010_GRBM_STATUS));
755
		RREG32(R_008010_GRBM_STATUS));
477
	dev_info(rdev->dev, "  R_008014_GRBM_STATUS2=0x%08X\n",
756
	dev_info(rdev->dev, "  R_008014_GRBM_STATUS2=0x%08X\n",
478
		RREG32(R_008014_GRBM_STATUS2));
757
		RREG32(R_008014_GRBM_STATUS2));
479
	dev_info(rdev->dev, "  R_000E50_SRBM_STATUS=0x%08X\n",
758
	dev_info(rdev->dev, "  R_000E50_SRBM_STATUS=0x%08X\n",
480
		RREG32(R_000E50_SRBM_STATUS));
759
		RREG32(R_000E50_SRBM_STATUS));
481
	rv515_mc_stop(rdev, &save);
760
	rv515_mc_stop(rdev, &save);
482
	if (r600_mc_wait_for_idle(rdev)) {
761
	if (r600_mc_wait_for_idle(rdev)) {
483
		dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
762
		dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
484
	}
763
	}
485
	/* Disable CP parsing/prefetching */
764
	/* Disable CP parsing/prefetching */
486
	WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(0xff));
765
	WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(0xff));
487
	/* Check if any of the rendering block is busy and reset it */
766
	/* Check if any of the rendering block is busy and reset it */
488
	if ((RREG32(R_008010_GRBM_STATUS) & grbm_busy_mask) ||
767
	if ((RREG32(R_008010_GRBM_STATUS) & grbm_busy_mask) ||
489
	    (RREG32(R_008014_GRBM_STATUS2) & grbm2_busy_mask)) {
768
	    (RREG32(R_008014_GRBM_STATUS2) & grbm2_busy_mask)) {
490
		tmp = S_008020_SOFT_RESET_CR(1) |
769
		tmp = S_008020_SOFT_RESET_CR(1) |
491
			S_008020_SOFT_RESET_DB(1) |
770
			S_008020_SOFT_RESET_DB(1) |
492
			S_008020_SOFT_RESET_CB(1) |
771
			S_008020_SOFT_RESET_CB(1) |
493
			S_008020_SOFT_RESET_PA(1) |
772
			S_008020_SOFT_RESET_PA(1) |
494
			S_008020_SOFT_RESET_SC(1) |
773
			S_008020_SOFT_RESET_SC(1) |
495
			S_008020_SOFT_RESET_SMX(1) |
774
			S_008020_SOFT_RESET_SMX(1) |
496
			S_008020_SOFT_RESET_SPI(1) |
775
			S_008020_SOFT_RESET_SPI(1) |
497
			S_008020_SOFT_RESET_SX(1) |
776
			S_008020_SOFT_RESET_SX(1) |
498
			S_008020_SOFT_RESET_SH(1) |
777
			S_008020_SOFT_RESET_SH(1) |
499
			S_008020_SOFT_RESET_TC(1) |
778
			S_008020_SOFT_RESET_TC(1) |
500
			S_008020_SOFT_RESET_TA(1) |
779
			S_008020_SOFT_RESET_TA(1) |
501
			S_008020_SOFT_RESET_VC(1) |
780
			S_008020_SOFT_RESET_VC(1) |
502
			S_008020_SOFT_RESET_VGT(1);
781
			S_008020_SOFT_RESET_VGT(1);
503
		dev_info(rdev->dev, "  R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
782
		dev_info(rdev->dev, "  R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
504
		WREG32(R_008020_GRBM_SOFT_RESET, tmp);
783
		WREG32(R_008020_GRBM_SOFT_RESET, tmp);
505
		(void)RREG32(R_008020_GRBM_SOFT_RESET);
784
		(void)RREG32(R_008020_GRBM_SOFT_RESET);
506
		udelay(50);
785
		udelay(50);
507
		WREG32(R_008020_GRBM_SOFT_RESET, 0);
786
		WREG32(R_008020_GRBM_SOFT_RESET, 0);
508
		(void)RREG32(R_008020_GRBM_SOFT_RESET);
787
		(void)RREG32(R_008020_GRBM_SOFT_RESET);
509
	}
788
	}
510
	/* Reset CP (we always reset CP) */
789
	/* Reset CP (we always reset CP) */
511
	tmp = S_008020_SOFT_RESET_CP(1);
790
	tmp = S_008020_SOFT_RESET_CP(1);
512
	dev_info(rdev->dev, "R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
791
	dev_info(rdev->dev, "R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
513
	WREG32(R_008020_GRBM_SOFT_RESET, tmp);
792
	WREG32(R_008020_GRBM_SOFT_RESET, tmp);
514
	(void)RREG32(R_008020_GRBM_SOFT_RESET);
793
	(void)RREG32(R_008020_GRBM_SOFT_RESET);
515
	udelay(50);
794
	udelay(50);
516
	WREG32(R_008020_GRBM_SOFT_RESET, 0);
795
	WREG32(R_008020_GRBM_SOFT_RESET, 0);
517
	(void)RREG32(R_008020_GRBM_SOFT_RESET);
796
	(void)RREG32(R_008020_GRBM_SOFT_RESET);
518
	/* Reset others GPU block if necessary */
797
	/* Reset others GPU block if necessary */
519
	if (G_000E50_RLC_BUSY(RREG32(R_000E50_SRBM_STATUS)))
798
	if (G_000E50_RLC_BUSY(RREG32(R_000E50_SRBM_STATUS)))
520
		srbm_reset |= S_000E60_SOFT_RESET_RLC(1);
799
		srbm_reset |= S_000E60_SOFT_RESET_RLC(1);
521
	if (G_000E50_GRBM_RQ_PENDING(RREG32(R_000E50_SRBM_STATUS)))
800
	if (G_000E50_GRBM_RQ_PENDING(RREG32(R_000E50_SRBM_STATUS)))
522
		srbm_reset |= S_000E60_SOFT_RESET_GRBM(1);
801
		srbm_reset |= S_000E60_SOFT_RESET_GRBM(1);
523
	if (G_000E50_HI_RQ_PENDING(RREG32(R_000E50_SRBM_STATUS)))
802
	if (G_000E50_HI_RQ_PENDING(RREG32(R_000E50_SRBM_STATUS)))
524
		srbm_reset |= S_000E60_SOFT_RESET_IH(1);
803
		srbm_reset |= S_000E60_SOFT_RESET_IH(1);
525
	if (G_000E50_VMC_BUSY(RREG32(R_000E50_SRBM_STATUS)))
804
	if (G_000E50_VMC_BUSY(RREG32(R_000E50_SRBM_STATUS)))
526
		srbm_reset |= S_000E60_SOFT_RESET_VMC(1);
805
		srbm_reset |= S_000E60_SOFT_RESET_VMC(1);
527
	if (G_000E50_MCB_BUSY(RREG32(R_000E50_SRBM_STATUS)))
806
	if (G_000E50_MCB_BUSY(RREG32(R_000E50_SRBM_STATUS)))
528
		srbm_reset |= S_000E60_SOFT_RESET_MC(1);
807
		srbm_reset |= S_000E60_SOFT_RESET_MC(1);
529
	if (G_000E50_MCDZ_BUSY(RREG32(R_000E50_SRBM_STATUS)))
808
	if (G_000E50_MCDZ_BUSY(RREG32(R_000E50_SRBM_STATUS)))
530
		srbm_reset |= S_000E60_SOFT_RESET_MC(1);
809
		srbm_reset |= S_000E60_SOFT_RESET_MC(1);
531
	if (G_000E50_MCDY_BUSY(RREG32(R_000E50_SRBM_STATUS)))
810
	if (G_000E50_MCDY_BUSY(RREG32(R_000E50_SRBM_STATUS)))
532
		srbm_reset |= S_000E60_SOFT_RESET_MC(1);
811
		srbm_reset |= S_000E60_SOFT_RESET_MC(1);
533
	if (G_000E50_MCDX_BUSY(RREG32(R_000E50_SRBM_STATUS)))
812
	if (G_000E50_MCDX_BUSY(RREG32(R_000E50_SRBM_STATUS)))
534
		srbm_reset |= S_000E60_SOFT_RESET_MC(1);
813
		srbm_reset |= S_000E60_SOFT_RESET_MC(1);
535
	if (G_000E50_MCDW_BUSY(RREG32(R_000E50_SRBM_STATUS)))
814
	if (G_000E50_MCDW_BUSY(RREG32(R_000E50_SRBM_STATUS)))
536
		srbm_reset |= S_000E60_SOFT_RESET_MC(1);
815
		srbm_reset |= S_000E60_SOFT_RESET_MC(1);
537
	if (G_000E50_RLC_BUSY(RREG32(R_000E50_SRBM_STATUS)))
816
	if (G_000E50_RLC_BUSY(RREG32(R_000E50_SRBM_STATUS)))
538
		srbm_reset |= S_000E60_SOFT_RESET_RLC(1);
817
		srbm_reset |= S_000E60_SOFT_RESET_RLC(1);
539
	if (G_000E50_SEM_BUSY(RREG32(R_000E50_SRBM_STATUS)))
818
	if (G_000E50_SEM_BUSY(RREG32(R_000E50_SRBM_STATUS)))
540
		srbm_reset |= S_000E60_SOFT_RESET_SEM(1);
819
		srbm_reset |= S_000E60_SOFT_RESET_SEM(1);
541
	if (G_000E50_BIF_BUSY(RREG32(R_000E50_SRBM_STATUS)))
820
	if (G_000E50_BIF_BUSY(RREG32(R_000E50_SRBM_STATUS)))
542
		srbm_reset |= S_000E60_SOFT_RESET_BIF(1);
821
		srbm_reset |= S_000E60_SOFT_RESET_BIF(1);
543
	dev_info(rdev->dev, "  R_000E60_SRBM_SOFT_RESET=0x%08X\n", srbm_reset);
822
	dev_info(rdev->dev, "  R_000E60_SRBM_SOFT_RESET=0x%08X\n", srbm_reset);
544
	WREG32(R_000E60_SRBM_SOFT_RESET, srbm_reset);
823
	WREG32(R_000E60_SRBM_SOFT_RESET, srbm_reset);
545
	(void)RREG32(R_000E60_SRBM_SOFT_RESET);
824
	(void)RREG32(R_000E60_SRBM_SOFT_RESET);
546
	udelay(50);
825
	udelay(50);
547
	WREG32(R_000E60_SRBM_SOFT_RESET, 0);
826
	WREG32(R_000E60_SRBM_SOFT_RESET, 0);
548
	(void)RREG32(R_000E60_SRBM_SOFT_RESET);
827
	(void)RREG32(R_000E60_SRBM_SOFT_RESET);
549
	WREG32(R_000E60_SRBM_SOFT_RESET, srbm_reset);
828
	WREG32(R_000E60_SRBM_SOFT_RESET, srbm_reset);
550
	(void)RREG32(R_000E60_SRBM_SOFT_RESET);
829
	(void)RREG32(R_000E60_SRBM_SOFT_RESET);
551
	udelay(50);
830
	udelay(50);
552
	WREG32(R_000E60_SRBM_SOFT_RESET, 0);
831
	WREG32(R_000E60_SRBM_SOFT_RESET, 0);
553
	(void)RREG32(R_000E60_SRBM_SOFT_RESET);
832
	(void)RREG32(R_000E60_SRBM_SOFT_RESET);
554
	/* Wait a little for things to settle down */
833
	/* Wait a little for things to settle down */
555
	udelay(50);
834
	udelay(50);
556
	dev_info(rdev->dev, "  R_008010_GRBM_STATUS=0x%08X\n",
835
	dev_info(rdev->dev, "  R_008010_GRBM_STATUS=0x%08X\n",
557
		RREG32(R_008010_GRBM_STATUS));
836
		RREG32(R_008010_GRBM_STATUS));
558
	dev_info(rdev->dev, "  R_008014_GRBM_STATUS2=0x%08X\n",
837
	dev_info(rdev->dev, "  R_008014_GRBM_STATUS2=0x%08X\n",
559
		RREG32(R_008014_GRBM_STATUS2));
838
		RREG32(R_008014_GRBM_STATUS2));
560
	dev_info(rdev->dev, "  R_000E50_SRBM_STATUS=0x%08X\n",
839
	dev_info(rdev->dev, "  R_000E50_SRBM_STATUS=0x%08X\n",
561
		RREG32(R_000E50_SRBM_STATUS));
840
		RREG32(R_000E50_SRBM_STATUS));
562
	/* After reset we need to reinit the asic as GPU often endup in an
841
	/* After reset we need to reinit the asic as GPU often endup in an
563
	 * incoherent state.
842
	 * incoherent state.
564
	 */
843
	 */
565
	atom_asic_init(rdev->mode_info.atom_context);
844
	atom_asic_init(rdev->mode_info.atom_context);
566
	rv515_mc_resume(rdev, &save);
845
	rv515_mc_resume(rdev, &save);
567
	return 0;
846
	return 0;
568
}
847
}
569
 
848
 
570
int r600_gpu_reset(struct radeon_device *rdev)
849
int r600_gpu_reset(struct radeon_device *rdev)
571
{
850
{
572
	return r600_gpu_soft_reset(rdev);
851
	return r600_gpu_soft_reset(rdev);
573
}
852
}
574
 
853
 
575
static u32 r600_get_tile_pipe_to_backend_map(u32 num_tile_pipes,
854
static u32 r600_get_tile_pipe_to_backend_map(u32 num_tile_pipes,
576
					     u32 num_backends,
855
					     u32 num_backends,
577
					     u32 backend_disable_mask)
856
					     u32 backend_disable_mask)
578
{
857
{
579
	u32 backend_map = 0;
858
	u32 backend_map = 0;
580
	u32 enabled_backends_mask;
859
	u32 enabled_backends_mask;
581
	u32 enabled_backends_count;
860
	u32 enabled_backends_count;
582
	u32 cur_pipe;
861
	u32 cur_pipe;
583
	u32 swizzle_pipe[R6XX_MAX_PIPES];
862
	u32 swizzle_pipe[R6XX_MAX_PIPES];
584
	u32 cur_backend;
863
	u32 cur_backend;
585
	u32 i;
864
	u32 i;
586
 
865
 
587
	if (num_tile_pipes > R6XX_MAX_PIPES)
866
	if (num_tile_pipes > R6XX_MAX_PIPES)
588
		num_tile_pipes = R6XX_MAX_PIPES;
867
		num_tile_pipes = R6XX_MAX_PIPES;
589
	if (num_tile_pipes < 1)
868
	if (num_tile_pipes < 1)
590
		num_tile_pipes = 1;
869
		num_tile_pipes = 1;
591
	if (num_backends > R6XX_MAX_BACKENDS)
870
	if (num_backends > R6XX_MAX_BACKENDS)
592
		num_backends = R6XX_MAX_BACKENDS;
871
		num_backends = R6XX_MAX_BACKENDS;
593
	if (num_backends < 1)
872
	if (num_backends < 1)
594
		num_backends = 1;
873
		num_backends = 1;
595
 
874
 
596
	enabled_backends_mask = 0;
875
	enabled_backends_mask = 0;
597
	enabled_backends_count = 0;
876
	enabled_backends_count = 0;
598
	for (i = 0; i < R6XX_MAX_BACKENDS; ++i) {
877
	for (i = 0; i < R6XX_MAX_BACKENDS; ++i) {
599
		if (((backend_disable_mask >> i) & 1) == 0) {
878
		if (((backend_disable_mask >> i) & 1) == 0) {
600
			enabled_backends_mask |= (1 << i);
879
			enabled_backends_mask |= (1 << i);
601
			++enabled_backends_count;
880
			++enabled_backends_count;
602
		}
881
		}
603
		if (enabled_backends_count == num_backends)
882
		if (enabled_backends_count == num_backends)
604
			break;
883
			break;
605
	}
884
	}
606
 
885
 
607
	if (enabled_backends_count == 0) {
886
	if (enabled_backends_count == 0) {
608
		enabled_backends_mask = 1;
887
		enabled_backends_mask = 1;
609
		enabled_backends_count = 1;
888
		enabled_backends_count = 1;
610
	}
889
	}
611
 
890
 
612
	if (enabled_backends_count != num_backends)
891
	if (enabled_backends_count != num_backends)
613
		num_backends = enabled_backends_count;
892
		num_backends = enabled_backends_count;
614
 
893
 
615
	memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * R6XX_MAX_PIPES);
894
	memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * R6XX_MAX_PIPES);
616
	switch (num_tile_pipes) {
895
	switch (num_tile_pipes) {
617
	case 1:
896
	case 1:
618
		swizzle_pipe[0] = 0;
897
		swizzle_pipe[0] = 0;
619
		break;
898
		break;
620
	case 2:
899
	case 2:
621
		swizzle_pipe[0] = 0;
900
		swizzle_pipe[0] = 0;
622
		swizzle_pipe[1] = 1;
901
		swizzle_pipe[1] = 1;
623
		break;
902
		break;
624
	case 3:
903
	case 3:
625
		swizzle_pipe[0] = 0;
904
		swizzle_pipe[0] = 0;
626
		swizzle_pipe[1] = 1;
905
		swizzle_pipe[1] = 1;
627
		swizzle_pipe[2] = 2;
906
		swizzle_pipe[2] = 2;
628
		break;
907
		break;
629
	case 4:
908
	case 4:
630
		swizzle_pipe[0] = 0;
909
		swizzle_pipe[0] = 0;
631
		swizzle_pipe[1] = 1;
910
		swizzle_pipe[1] = 1;
632
		swizzle_pipe[2] = 2;
911
		swizzle_pipe[2] = 2;
633
		swizzle_pipe[3] = 3;
912
		swizzle_pipe[3] = 3;
634
		break;
913
		break;
635
	case 5:
914
	case 5:
636
		swizzle_pipe[0] = 0;
915
		swizzle_pipe[0] = 0;
637
		swizzle_pipe[1] = 1;
916
		swizzle_pipe[1] = 1;
638
		swizzle_pipe[2] = 2;
917
		swizzle_pipe[2] = 2;
639
		swizzle_pipe[3] = 3;
918
		swizzle_pipe[3] = 3;
640
		swizzle_pipe[4] = 4;
919
		swizzle_pipe[4] = 4;
641
		break;
920
		break;
642
	case 6:
921
	case 6:
643
		swizzle_pipe[0] = 0;
922
		swizzle_pipe[0] = 0;
644
		swizzle_pipe[1] = 2;
923
		swizzle_pipe[1] = 2;
645
		swizzle_pipe[2] = 4;
924
		swizzle_pipe[2] = 4;
646
		swizzle_pipe[3] = 5;
925
		swizzle_pipe[3] = 5;
647
		swizzle_pipe[4] = 1;
926
		swizzle_pipe[4] = 1;
648
		swizzle_pipe[5] = 3;
927
		swizzle_pipe[5] = 3;
649
		break;
928
		break;
650
	case 7:
929
	case 7:
651
		swizzle_pipe[0] = 0;
930
		swizzle_pipe[0] = 0;
652
		swizzle_pipe[1] = 2;
931
		swizzle_pipe[1] = 2;
653
		swizzle_pipe[2] = 4;
932
		swizzle_pipe[2] = 4;
654
		swizzle_pipe[3] = 6;
933
		swizzle_pipe[3] = 6;
655
		swizzle_pipe[4] = 1;
934
		swizzle_pipe[4] = 1;
656
		swizzle_pipe[5] = 3;
935
		swizzle_pipe[5] = 3;
657
		swizzle_pipe[6] = 5;
936
		swizzle_pipe[6] = 5;
658
		break;
937
		break;
659
	case 8:
938
	case 8:
660
		swizzle_pipe[0] = 0;
939
		swizzle_pipe[0] = 0;
661
		swizzle_pipe[1] = 2;
940
		swizzle_pipe[1] = 2;
662
		swizzle_pipe[2] = 4;
941
		swizzle_pipe[2] = 4;
663
		swizzle_pipe[3] = 6;
942
		swizzle_pipe[3] = 6;
664
		swizzle_pipe[4] = 1;
943
		swizzle_pipe[4] = 1;
665
		swizzle_pipe[5] = 3;
944
		swizzle_pipe[5] = 3;
666
		swizzle_pipe[6] = 5;
945
		swizzle_pipe[6] = 5;
667
		swizzle_pipe[7] = 7;
946
		swizzle_pipe[7] = 7;
668
		break;
947
		break;
669
	}
948
	}
670
 
949
 
671
	cur_backend = 0;
950
	cur_backend = 0;
672
	for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) {
951
	for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) {
673
		while (((1 << cur_backend) & enabled_backends_mask) == 0)
952
		while (((1 << cur_backend) & enabled_backends_mask) == 0)
674
			cur_backend = (cur_backend + 1) % R6XX_MAX_BACKENDS;
953
			cur_backend = (cur_backend + 1) % R6XX_MAX_BACKENDS;
675
 
954
 
676
		backend_map |= (u32)(((cur_backend & 3) << (swizzle_pipe[cur_pipe] * 2)));
955
		backend_map |= (u32)(((cur_backend & 3) << (swizzle_pipe[cur_pipe] * 2)));
677
 
956
 
678
		cur_backend = (cur_backend + 1) % R6XX_MAX_BACKENDS;
957
		cur_backend = (cur_backend + 1) % R6XX_MAX_BACKENDS;
679
	}
958
	}
680
 
959
 
681
	return backend_map;
960
	return backend_map;
682
}
961
}
683
 
962
 
684
int r600_count_pipe_bits(uint32_t val)
963
int r600_count_pipe_bits(uint32_t val)
685
{
964
{
686
	int i, ret = 0;
965
	int i, ret = 0;
687
 
966
 
688
	for (i = 0; i < 32; i++) {
967
	for (i = 0; i < 32; i++) {
689
		ret += val & 1;
968
		ret += val & 1;
690
		val >>= 1;
969
		val >>= 1;
691
	}
970
	}
692
	return ret;
971
	return ret;
693
}
972
}
694
 
973
 
695
void r600_gpu_init(struct radeon_device *rdev)
974
void r600_gpu_init(struct radeon_device *rdev)
696
{
975
{
697
	u32 tiling_config;
976
	u32 tiling_config;
698
	u32 ramcfg;
977
	u32 ramcfg;
699
	u32 tmp;
978
	u32 tmp;
700
	int i, j;
979
	int i, j;
701
	u32 sq_config;
980
	u32 sq_config;
702
	u32 sq_gpr_resource_mgmt_1 = 0;
981
	u32 sq_gpr_resource_mgmt_1 = 0;
703
	u32 sq_gpr_resource_mgmt_2 = 0;
982
	u32 sq_gpr_resource_mgmt_2 = 0;
704
	u32 sq_thread_resource_mgmt = 0;
983
	u32 sq_thread_resource_mgmt = 0;
705
	u32 sq_stack_resource_mgmt_1 = 0;
984
	u32 sq_stack_resource_mgmt_1 = 0;
706
	u32 sq_stack_resource_mgmt_2 = 0;
985
	u32 sq_stack_resource_mgmt_2 = 0;
707
 
986
 
708
	/* FIXME: implement */
987
	/* FIXME: implement */
709
	switch (rdev->family) {
988
	switch (rdev->family) {
710
	case CHIP_R600:
989
	case CHIP_R600:
711
		rdev->config.r600.max_pipes = 4;
990
		rdev->config.r600.max_pipes = 4;
712
		rdev->config.r600.max_tile_pipes = 8;
991
		rdev->config.r600.max_tile_pipes = 8;
713
		rdev->config.r600.max_simds = 4;
992
		rdev->config.r600.max_simds = 4;
714
		rdev->config.r600.max_backends = 4;
993
		rdev->config.r600.max_backends = 4;
715
		rdev->config.r600.max_gprs = 256;
994
		rdev->config.r600.max_gprs = 256;
716
		rdev->config.r600.max_threads = 192;
995
		rdev->config.r600.max_threads = 192;
717
		rdev->config.r600.max_stack_entries = 256;
996
		rdev->config.r600.max_stack_entries = 256;
718
		rdev->config.r600.max_hw_contexts = 8;
997
		rdev->config.r600.max_hw_contexts = 8;
719
		rdev->config.r600.max_gs_threads = 16;
998
		rdev->config.r600.max_gs_threads = 16;
720
		rdev->config.r600.sx_max_export_size = 128;
999
		rdev->config.r600.sx_max_export_size = 128;
721
		rdev->config.r600.sx_max_export_pos_size = 16;
1000
		rdev->config.r600.sx_max_export_pos_size = 16;
722
		rdev->config.r600.sx_max_export_smx_size = 128;
1001
		rdev->config.r600.sx_max_export_smx_size = 128;
723
		rdev->config.r600.sq_num_cf_insts = 2;
1002
		rdev->config.r600.sq_num_cf_insts = 2;
724
		break;
1003
		break;
725
	case CHIP_RV630:
1004
	case CHIP_RV630:
726
	case CHIP_RV635:
1005
	case CHIP_RV635:
727
		rdev->config.r600.max_pipes = 2;
1006
		rdev->config.r600.max_pipes = 2;
728
		rdev->config.r600.max_tile_pipes = 2;
1007
		rdev->config.r600.max_tile_pipes = 2;
729
		rdev->config.r600.max_simds = 3;
1008
		rdev->config.r600.max_simds = 3;
730
		rdev->config.r600.max_backends = 1;
1009
		rdev->config.r600.max_backends = 1;
731
		rdev->config.r600.max_gprs = 128;
1010
		rdev->config.r600.max_gprs = 128;
732
		rdev->config.r600.max_threads = 192;
1011
		rdev->config.r600.max_threads = 192;
733
		rdev->config.r600.max_stack_entries = 128;
1012
		rdev->config.r600.max_stack_entries = 128;
734
		rdev->config.r600.max_hw_contexts = 8;
1013
		rdev->config.r600.max_hw_contexts = 8;
735
		rdev->config.r600.max_gs_threads = 4;
1014
		rdev->config.r600.max_gs_threads = 4;
736
		rdev->config.r600.sx_max_export_size = 128;
1015
		rdev->config.r600.sx_max_export_size = 128;
737
		rdev->config.r600.sx_max_export_pos_size = 16;
1016
		rdev->config.r600.sx_max_export_pos_size = 16;
738
		rdev->config.r600.sx_max_export_smx_size = 128;
1017
		rdev->config.r600.sx_max_export_smx_size = 128;
739
		rdev->config.r600.sq_num_cf_insts = 2;
1018
		rdev->config.r600.sq_num_cf_insts = 2;
740
		break;
1019
		break;
741
	case CHIP_RV610:
1020
	case CHIP_RV610:
742
	case CHIP_RV620:
1021
	case CHIP_RV620:
743
	case CHIP_RS780:
1022
	case CHIP_RS780:
744
	case CHIP_RS880:
1023
	case CHIP_RS880:
745
		rdev->config.r600.max_pipes = 1;
1024
		rdev->config.r600.max_pipes = 1;
746
		rdev->config.r600.max_tile_pipes = 1;
1025
		rdev->config.r600.max_tile_pipes = 1;
747
		rdev->config.r600.max_simds = 2;
1026
		rdev->config.r600.max_simds = 2;
748
		rdev->config.r600.max_backends = 1;
1027
		rdev->config.r600.max_backends = 1;
749
		rdev->config.r600.max_gprs = 128;
1028
		rdev->config.r600.max_gprs = 128;
750
		rdev->config.r600.max_threads = 192;
1029
		rdev->config.r600.max_threads = 192;
751
		rdev->config.r600.max_stack_entries = 128;
1030
		rdev->config.r600.max_stack_entries = 128;
752
		rdev->config.r600.max_hw_contexts = 4;
1031
		rdev->config.r600.max_hw_contexts = 4;
753
		rdev->config.r600.max_gs_threads = 4;
1032
		rdev->config.r600.max_gs_threads = 4;
754
		rdev->config.r600.sx_max_export_size = 128;
1033
		rdev->config.r600.sx_max_export_size = 128;
755
		rdev->config.r600.sx_max_export_pos_size = 16;
1034
		rdev->config.r600.sx_max_export_pos_size = 16;
756
		rdev->config.r600.sx_max_export_smx_size = 128;
1035
		rdev->config.r600.sx_max_export_smx_size = 128;
757
		rdev->config.r600.sq_num_cf_insts = 1;
1036
		rdev->config.r600.sq_num_cf_insts = 1;
758
		break;
1037
		break;
759
	case CHIP_RV670:
1038
	case CHIP_RV670:
760
		rdev->config.r600.max_pipes = 4;
1039
		rdev->config.r600.max_pipes = 4;
761
		rdev->config.r600.max_tile_pipes = 4;
1040
		rdev->config.r600.max_tile_pipes = 4;
762
		rdev->config.r600.max_simds = 4;
1041
		rdev->config.r600.max_simds = 4;
763
		rdev->config.r600.max_backends = 4;
1042
		rdev->config.r600.max_backends = 4;
764
		rdev->config.r600.max_gprs = 192;
1043
		rdev->config.r600.max_gprs = 192;
765
		rdev->config.r600.max_threads = 192;
1044
		rdev->config.r600.max_threads = 192;
766
		rdev->config.r600.max_stack_entries = 256;
1045
		rdev->config.r600.max_stack_entries = 256;
767
		rdev->config.r600.max_hw_contexts = 8;
1046
		rdev->config.r600.max_hw_contexts = 8;
768
		rdev->config.r600.max_gs_threads = 16;
1047
		rdev->config.r600.max_gs_threads = 16;
769
		rdev->config.r600.sx_max_export_size = 128;
1048
		rdev->config.r600.sx_max_export_size = 128;
770
		rdev->config.r600.sx_max_export_pos_size = 16;
1049
		rdev->config.r600.sx_max_export_pos_size = 16;
771
		rdev->config.r600.sx_max_export_smx_size = 128;
1050
		rdev->config.r600.sx_max_export_smx_size = 128;
772
		rdev->config.r600.sq_num_cf_insts = 2;
1051
		rdev->config.r600.sq_num_cf_insts = 2;
773
		break;
1052
		break;
774
	default:
1053
	default:
775
		break;
1054
		break;
776
	}
1055
	}
777
 
1056
 
778
	/* Initialize HDP */
1057
	/* Initialize HDP */
779
	for (i = 0, j = 0; i < 32; i++, j += 0x18) {
1058
	for (i = 0, j = 0; i < 32; i++, j += 0x18) {
780
		WREG32((0x2c14 + j), 0x00000000);
1059
		WREG32((0x2c14 + j), 0x00000000);
781
		WREG32((0x2c18 + j), 0x00000000);
1060
		WREG32((0x2c18 + j), 0x00000000);
782
		WREG32((0x2c1c + j), 0x00000000);
1061
		WREG32((0x2c1c + j), 0x00000000);
783
		WREG32((0x2c20 + j), 0x00000000);
1062
		WREG32((0x2c20 + j), 0x00000000);
784
		WREG32((0x2c24 + j), 0x00000000);
1063
		WREG32((0x2c24 + j), 0x00000000);
785
	}
1064
	}
786
 
1065
 
787
	WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
1066
	WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
788
 
1067
 
789
	/* Setup tiling */
1068
	/* Setup tiling */
790
	tiling_config = 0;
1069
	tiling_config = 0;
791
	ramcfg = RREG32(RAMCFG);
1070
	ramcfg = RREG32(RAMCFG);
792
	switch (rdev->config.r600.max_tile_pipes) {
1071
	switch (rdev->config.r600.max_tile_pipes) {
793
	case 1:
1072
	case 1:
794
		tiling_config |= PIPE_TILING(0);
1073
		tiling_config |= PIPE_TILING(0);
795
		break;
1074
		break;
796
	case 2:
1075
	case 2:
797
		tiling_config |= PIPE_TILING(1);
1076
		tiling_config |= PIPE_TILING(1);
798
		break;
1077
		break;
799
	case 4:
1078
	case 4:
800
		tiling_config |= PIPE_TILING(2);
1079
		tiling_config |= PIPE_TILING(2);
801
		break;
1080
		break;
802
	case 8:
1081
	case 8:
803
		tiling_config |= PIPE_TILING(3);
1082
		tiling_config |= PIPE_TILING(3);
804
		break;
1083
		break;
805
	default:
1084
	default:
806
		break;
1085
		break;
807
	}
1086
	}
808
	tiling_config |= BANK_TILING((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
1087
	tiling_config |= BANK_TILING((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
809
	tiling_config |= GROUP_SIZE(0);
1088
	tiling_config |= GROUP_SIZE(0);
810
	tmp = (ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT;
1089
	tmp = (ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT;
811
	if (tmp > 3) {
1090
	if (tmp > 3) {
812
		tiling_config |= ROW_TILING(3);
1091
		tiling_config |= ROW_TILING(3);
813
		tiling_config |= SAMPLE_SPLIT(3);
1092
		tiling_config |= SAMPLE_SPLIT(3);
814
	} else {
1093
	} else {
815
		tiling_config |= ROW_TILING(tmp);
1094
		tiling_config |= ROW_TILING(tmp);
816
		tiling_config |= SAMPLE_SPLIT(tmp);
1095
		tiling_config |= SAMPLE_SPLIT(tmp);
817
	}
1096
	}
818
	tiling_config |= BANK_SWAPS(1);
1097
	tiling_config |= BANK_SWAPS(1);
819
	tmp = r600_get_tile_pipe_to_backend_map(rdev->config.r600.max_tile_pipes,
1098
	tmp = r600_get_tile_pipe_to_backend_map(rdev->config.r600.max_tile_pipes,
820
						rdev->config.r600.max_backends,
1099
						rdev->config.r600.max_backends,
821
						(0xff << rdev->config.r600.max_backends) & 0xff);
1100
						(0xff << rdev->config.r600.max_backends) & 0xff);
822
	tiling_config |= BACKEND_MAP(tmp);
1101
	tiling_config |= BACKEND_MAP(tmp);
823
	WREG32(GB_TILING_CONFIG, tiling_config);
1102
	WREG32(GB_TILING_CONFIG, tiling_config);
824
	WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff);
1103
	WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff);
825
	WREG32(HDP_TILING_CONFIG, tiling_config & 0xffff);
1104
	WREG32(HDP_TILING_CONFIG, tiling_config & 0xffff);
826
 
1105
 
827
	tmp = BACKEND_DISABLE((R6XX_MAX_BACKENDS_MASK << rdev->config.r600.max_backends) & R6XX_MAX_BACKENDS_MASK);
1106
	tmp = BACKEND_DISABLE((R6XX_MAX_BACKENDS_MASK << rdev->config.r600.max_backends) & R6XX_MAX_BACKENDS_MASK);
828
	WREG32(CC_RB_BACKEND_DISABLE, tmp);
1107
	WREG32(CC_RB_BACKEND_DISABLE, tmp);
829
 
1108
 
830
	/* Setup pipes */
1109
	/* Setup pipes */
831
	tmp = INACTIVE_QD_PIPES((R6XX_MAX_PIPES_MASK << rdev->config.r600.max_pipes) & R6XX_MAX_PIPES_MASK);
1110
	tmp = INACTIVE_QD_PIPES((R6XX_MAX_PIPES_MASK << rdev->config.r600.max_pipes) & R6XX_MAX_PIPES_MASK);
832
	tmp |= INACTIVE_SIMDS((R6XX_MAX_SIMDS_MASK << rdev->config.r600.max_simds) & R6XX_MAX_SIMDS_MASK);
1111
	tmp |= INACTIVE_SIMDS((R6XX_MAX_SIMDS_MASK << rdev->config.r600.max_simds) & R6XX_MAX_SIMDS_MASK);
833
	WREG32(CC_GC_SHADER_PIPE_CONFIG, tmp);
1112
	WREG32(CC_GC_SHADER_PIPE_CONFIG, tmp);
834
	WREG32(GC_USER_SHADER_PIPE_CONFIG, tmp);
1113
	WREG32(GC_USER_SHADER_PIPE_CONFIG, tmp);
835
 
1114
 
836
	tmp = R6XX_MAX_BACKENDS - r600_count_pipe_bits(tmp & INACTIVE_QD_PIPES_MASK);
1115
	tmp = R6XX_MAX_BACKENDS - r600_count_pipe_bits(tmp & INACTIVE_QD_PIPES_MASK);
837
	WREG32(VGT_OUT_DEALLOC_CNTL, (tmp * 4) & DEALLOC_DIST_MASK);
1116
	WREG32(VGT_OUT_DEALLOC_CNTL, (tmp * 4) & DEALLOC_DIST_MASK);
838
	WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((tmp * 4) - 2) & VTX_REUSE_DEPTH_MASK);
1117
	WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((tmp * 4) - 2) & VTX_REUSE_DEPTH_MASK);
839
 
1118
 
840
	/* Setup some CP states */
1119
	/* Setup some CP states */
841
	WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) | ROQ_IB2_START(0x2b)));
1120
	WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) | ROQ_IB2_START(0x2b)));
842
	WREG32(CP_MEQ_THRESHOLDS, (MEQ_END(0x40) | ROQ_END(0x40)));
1121
	WREG32(CP_MEQ_THRESHOLDS, (MEQ_END(0x40) | ROQ_END(0x40)));
843
 
1122
 
844
	WREG32(TA_CNTL_AUX, (DISABLE_CUBE_ANISO | SYNC_GRADIENT |
1123
	WREG32(TA_CNTL_AUX, (DISABLE_CUBE_ANISO | SYNC_GRADIENT |
845
			     SYNC_WALKER | SYNC_ALIGNER));
1124
			     SYNC_WALKER | SYNC_ALIGNER));
846
	/* Setup various GPU states */
1125
	/* Setup various GPU states */
847
	if (rdev->family == CHIP_RV670)
1126
	if (rdev->family == CHIP_RV670)
848
		WREG32(ARB_GDEC_RD_CNTL, 0x00000021);
1127
		WREG32(ARB_GDEC_RD_CNTL, 0x00000021);
849
 
1128
 
850
	tmp = RREG32(SX_DEBUG_1);
1129
	tmp = RREG32(SX_DEBUG_1);
851
	tmp |= SMX_EVENT_RELEASE;
1130
	tmp |= SMX_EVENT_RELEASE;
852
	if ((rdev->family > CHIP_R600))
1131
	if ((rdev->family > CHIP_R600))
853
		tmp |= ENABLE_NEW_SMX_ADDRESS;
1132
		tmp |= ENABLE_NEW_SMX_ADDRESS;
854
	WREG32(SX_DEBUG_1, tmp);
1133
	WREG32(SX_DEBUG_1, tmp);
855
 
1134
 
856
	if (((rdev->family) == CHIP_R600) ||
1135
	if (((rdev->family) == CHIP_R600) ||
857
	    ((rdev->family) == CHIP_RV630) ||
1136
	    ((rdev->family) == CHIP_RV630) ||
858
	    ((rdev->family) == CHIP_RV610) ||
1137
	    ((rdev->family) == CHIP_RV610) ||
859
	    ((rdev->family) == CHIP_RV620) ||
1138
	    ((rdev->family) == CHIP_RV620) ||
860
	    ((rdev->family) == CHIP_RS780) ||
1139
	    ((rdev->family) == CHIP_RS780) ||
861
	    ((rdev->family) == CHIP_RS880)) {
1140
	    ((rdev->family) == CHIP_RS880)) {
862
		WREG32(DB_DEBUG, PREZ_MUST_WAIT_FOR_POSTZ_DONE);
1141
		WREG32(DB_DEBUG, PREZ_MUST_WAIT_FOR_POSTZ_DONE);
863
	} else {
1142
	} else {
864
		WREG32(DB_DEBUG, 0);
1143
		WREG32(DB_DEBUG, 0);
865
	}
1144
	}
866
	WREG32(DB_WATERMARKS, (DEPTH_FREE(4) | DEPTH_CACHELINE_FREE(16) |
1145
	WREG32(DB_WATERMARKS, (DEPTH_FREE(4) | DEPTH_CACHELINE_FREE(16) |
867
			       DEPTH_FLUSH(16) | DEPTH_PENDING_FREE(4)));
1146
			       DEPTH_FLUSH(16) | DEPTH_PENDING_FREE(4)));
868
 
1147
 
869
	WREG32(PA_SC_MULTI_CHIP_CNTL, 0);
1148
	WREG32(PA_SC_MULTI_CHIP_CNTL, 0);
870
	WREG32(VGT_NUM_INSTANCES, 0);
1149
	WREG32(VGT_NUM_INSTANCES, 0);
871
 
1150
 
872
	WREG32(SPI_CONFIG_CNTL, GPR_WRITE_PRIORITY(0));
1151
	WREG32(SPI_CONFIG_CNTL, GPR_WRITE_PRIORITY(0));
873
	WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(0));
1152
	WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(0));
874
 
1153
 
875
	tmp = RREG32(SQ_MS_FIFO_SIZES);
1154
	tmp = RREG32(SQ_MS_FIFO_SIZES);
876
	if (((rdev->family) == CHIP_RV610) ||
1155
	if (((rdev->family) == CHIP_RV610) ||
877
	    ((rdev->family) == CHIP_RV620) ||
1156
	    ((rdev->family) == CHIP_RV620) ||
878
	    ((rdev->family) == CHIP_RS780) ||
1157
	    ((rdev->family) == CHIP_RS780) ||
879
	    ((rdev->family) == CHIP_RS880)) {
1158
	    ((rdev->family) == CHIP_RS880)) {
880
		tmp = (CACHE_FIFO_SIZE(0xa) |
1159
		tmp = (CACHE_FIFO_SIZE(0xa) |
881
		       FETCH_FIFO_HIWATER(0xa) |
1160
		       FETCH_FIFO_HIWATER(0xa) |
882
		       DONE_FIFO_HIWATER(0xe0) |
1161
		       DONE_FIFO_HIWATER(0xe0) |
883
		       ALU_UPDATE_FIFO_HIWATER(0x8));
1162
		       ALU_UPDATE_FIFO_HIWATER(0x8));
884
	} else if (((rdev->family) == CHIP_R600) ||
1163
	} else if (((rdev->family) == CHIP_R600) ||
885
		   ((rdev->family) == CHIP_RV630)) {
1164
		   ((rdev->family) == CHIP_RV630)) {
886
		tmp &= ~DONE_FIFO_HIWATER(0xff);
1165
		tmp &= ~DONE_FIFO_HIWATER(0xff);
887
		tmp |= DONE_FIFO_HIWATER(0x4);
1166
		tmp |= DONE_FIFO_HIWATER(0x4);
888
	}
1167
	}
889
	WREG32(SQ_MS_FIFO_SIZES, tmp);
1168
	WREG32(SQ_MS_FIFO_SIZES, tmp);
890
 
1169
 
891
	/* SQ_CONFIG, SQ_GPR_RESOURCE_MGMT, SQ_THREAD_RESOURCE_MGMT, SQ_STACK_RESOURCE_MGMT
1170
	/* SQ_CONFIG, SQ_GPR_RESOURCE_MGMT, SQ_THREAD_RESOURCE_MGMT, SQ_STACK_RESOURCE_MGMT
892
	 * should be adjusted as needed by the 2D/3D drivers.  This just sets default values
1171
	 * should be adjusted as needed by the 2D/3D drivers.  This just sets default values
893
	 */
1172
	 */
894
	sq_config = RREG32(SQ_CONFIG);
1173
	sq_config = RREG32(SQ_CONFIG);
895
	sq_config &= ~(PS_PRIO(3) |
1174
	sq_config &= ~(PS_PRIO(3) |
896
		       VS_PRIO(3) |
1175
		       VS_PRIO(3) |
897
		       GS_PRIO(3) |
1176
		       GS_PRIO(3) |
898
		       ES_PRIO(3));
1177
		       ES_PRIO(3));
899
	sq_config |= (DX9_CONSTS |
1178
	sq_config |= (DX9_CONSTS |
900
		      VC_ENABLE |
1179
		      VC_ENABLE |
901
		      PS_PRIO(0) |
1180
		      PS_PRIO(0) |
902
		      VS_PRIO(1) |
1181
		      VS_PRIO(1) |
903
		      GS_PRIO(2) |
1182
		      GS_PRIO(2) |
904
		      ES_PRIO(3));
1183
		      ES_PRIO(3));
905
 
1184
 
906
	if ((rdev->family) == CHIP_R600) {
1185
	if ((rdev->family) == CHIP_R600) {
907
		sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(124) |
1186
		sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(124) |
908
					  NUM_VS_GPRS(124) |
1187
					  NUM_VS_GPRS(124) |
909
					  NUM_CLAUSE_TEMP_GPRS(4));
1188
					  NUM_CLAUSE_TEMP_GPRS(4));
910
		sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(0) |
1189
		sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(0) |
911
					  NUM_ES_GPRS(0));
1190
					  NUM_ES_GPRS(0));
912
		sq_thread_resource_mgmt = (NUM_PS_THREADS(136) |
1191
		sq_thread_resource_mgmt = (NUM_PS_THREADS(136) |
913
					   NUM_VS_THREADS(48) |
1192
					   NUM_VS_THREADS(48) |
914
					   NUM_GS_THREADS(4) |
1193
					   NUM_GS_THREADS(4) |
915
					   NUM_ES_THREADS(4));
1194
					   NUM_ES_THREADS(4));
916
		sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(128) |
1195
		sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(128) |
917
					    NUM_VS_STACK_ENTRIES(128));
1196
					    NUM_VS_STACK_ENTRIES(128));
918
		sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(0) |
1197
		sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(0) |
919
					    NUM_ES_STACK_ENTRIES(0));
1198
					    NUM_ES_STACK_ENTRIES(0));
920
	} else if (((rdev->family) == CHIP_RV610) ||
1199
	} else if (((rdev->family) == CHIP_RV610) ||
921
		   ((rdev->family) == CHIP_RV620) ||
1200
		   ((rdev->family) == CHIP_RV620) ||
922
		   ((rdev->family) == CHIP_RS780) ||
1201
		   ((rdev->family) == CHIP_RS780) ||
923
		   ((rdev->family) == CHIP_RS880)) {
1202
		   ((rdev->family) == CHIP_RS880)) {
924
		/* no vertex cache */
1203
		/* no vertex cache */
925
		sq_config &= ~VC_ENABLE;
1204
		sq_config &= ~VC_ENABLE;
926
 
1205
 
927
		sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
1206
		sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
928
					  NUM_VS_GPRS(44) |
1207
					  NUM_VS_GPRS(44) |
929
					  NUM_CLAUSE_TEMP_GPRS(2));
1208
					  NUM_CLAUSE_TEMP_GPRS(2));
930
		sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(17) |
1209
		sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(17) |
931
					  NUM_ES_GPRS(17));
1210
					  NUM_ES_GPRS(17));
932
		sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
1211
		sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
933
					   NUM_VS_THREADS(78) |
1212
					   NUM_VS_THREADS(78) |
934
					   NUM_GS_THREADS(4) |
1213
					   NUM_GS_THREADS(4) |
935
					   NUM_ES_THREADS(31));
1214
					   NUM_ES_THREADS(31));
936
		sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(40) |
1215
		sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(40) |
937
					    NUM_VS_STACK_ENTRIES(40));
1216
					    NUM_VS_STACK_ENTRIES(40));
938
		sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(32) |
1217
		sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(32) |
939
					    NUM_ES_STACK_ENTRIES(16));
1218
					    NUM_ES_STACK_ENTRIES(16));
940
	} else if (((rdev->family) == CHIP_RV630) ||
1219
	} else if (((rdev->family) == CHIP_RV630) ||
941
		   ((rdev->family) == CHIP_RV635)) {
1220
		   ((rdev->family) == CHIP_RV635)) {
942
		sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
1221
		sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
943
					  NUM_VS_GPRS(44) |
1222
					  NUM_VS_GPRS(44) |
944
					  NUM_CLAUSE_TEMP_GPRS(2));
1223
					  NUM_CLAUSE_TEMP_GPRS(2));
945
		sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(18) |
1224
		sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(18) |
946
					  NUM_ES_GPRS(18));
1225
					  NUM_ES_GPRS(18));
947
		sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
1226
		sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
948
					   NUM_VS_THREADS(78) |
1227
					   NUM_VS_THREADS(78) |
949
					   NUM_GS_THREADS(4) |
1228
					   NUM_GS_THREADS(4) |
950
					   NUM_ES_THREADS(31));
1229
					   NUM_ES_THREADS(31));
951
		sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(40) |
1230
		sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(40) |
952
					    NUM_VS_STACK_ENTRIES(40));
1231
					    NUM_VS_STACK_ENTRIES(40));
953
		sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(32) |
1232
		sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(32) |
954
					    NUM_ES_STACK_ENTRIES(16));
1233
					    NUM_ES_STACK_ENTRIES(16));
955
	} else if ((rdev->family) == CHIP_RV670) {
1234
	} else if ((rdev->family) == CHIP_RV670) {
956
		sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
1235
		sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
957
					  NUM_VS_GPRS(44) |
1236
					  NUM_VS_GPRS(44) |
958
					  NUM_CLAUSE_TEMP_GPRS(2));
1237
					  NUM_CLAUSE_TEMP_GPRS(2));
959
		sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(17) |
1238
		sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(17) |
960
					  NUM_ES_GPRS(17));
1239
					  NUM_ES_GPRS(17));
961
		sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
1240
		sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
962
					   NUM_VS_THREADS(78) |
1241
					   NUM_VS_THREADS(78) |
963
					   NUM_GS_THREADS(4) |
1242
					   NUM_GS_THREADS(4) |
964
					   NUM_ES_THREADS(31));
1243
					   NUM_ES_THREADS(31));
965
		sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(64) |
1244
		sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(64) |
966
					    NUM_VS_STACK_ENTRIES(64));
1245
					    NUM_VS_STACK_ENTRIES(64));
967
		sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(64) |
1246
		sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(64) |
968
					    NUM_ES_STACK_ENTRIES(64));
1247
					    NUM_ES_STACK_ENTRIES(64));
969
	}
1248
	}
970
 
1249
 
971
	WREG32(SQ_CONFIG, sq_config);
1250
	WREG32(SQ_CONFIG, sq_config);
972
	WREG32(SQ_GPR_RESOURCE_MGMT_1,  sq_gpr_resource_mgmt_1);
1251
	WREG32(SQ_GPR_RESOURCE_MGMT_1,  sq_gpr_resource_mgmt_1);
973
	WREG32(SQ_GPR_RESOURCE_MGMT_2,  sq_gpr_resource_mgmt_2);
1252
	WREG32(SQ_GPR_RESOURCE_MGMT_2,  sq_gpr_resource_mgmt_2);
974
	WREG32(SQ_THREAD_RESOURCE_MGMT, sq_thread_resource_mgmt);
1253
	WREG32(SQ_THREAD_RESOURCE_MGMT, sq_thread_resource_mgmt);
975
	WREG32(SQ_STACK_RESOURCE_MGMT_1, sq_stack_resource_mgmt_1);
1254
	WREG32(SQ_STACK_RESOURCE_MGMT_1, sq_stack_resource_mgmt_1);
976
	WREG32(SQ_STACK_RESOURCE_MGMT_2, sq_stack_resource_mgmt_2);
1255
	WREG32(SQ_STACK_RESOURCE_MGMT_2, sq_stack_resource_mgmt_2);
977
 
1256
 
978
	if (((rdev->family) == CHIP_RV610) ||
1257
	if (((rdev->family) == CHIP_RV610) ||
979
	    ((rdev->family) == CHIP_RV620) ||
1258
	    ((rdev->family) == CHIP_RV620) ||
980
	    ((rdev->family) == CHIP_RS780) ||
1259
	    ((rdev->family) == CHIP_RS780) ||
981
	    ((rdev->family) == CHIP_RS880)) {
1260
	    ((rdev->family) == CHIP_RS880)) {
982
		WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(TC_ONLY));
1261
		WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(TC_ONLY));
983
	} else {
1262
	} else {
984
		WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC));
1263
		WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC));
985
	}
1264
	}
986
 
1265
 
987
	/* More default values. 2D/3D driver should adjust as needed */
1266
	/* More default values. 2D/3D driver should adjust as needed */
988
	WREG32(PA_SC_AA_SAMPLE_LOCS_2S, (S0_X(0xc) | S0_Y(0x4) |
1267
	WREG32(PA_SC_AA_SAMPLE_LOCS_2S, (S0_X(0xc) | S0_Y(0x4) |
989
					 S1_X(0x4) | S1_Y(0xc)));
1268
					 S1_X(0x4) | S1_Y(0xc)));
990
	WREG32(PA_SC_AA_SAMPLE_LOCS_4S, (S0_X(0xe) | S0_Y(0xe) |
1269
	WREG32(PA_SC_AA_SAMPLE_LOCS_4S, (S0_X(0xe) | S0_Y(0xe) |
991
					 S1_X(0x2) | S1_Y(0x2) |
1270
					 S1_X(0x2) | S1_Y(0x2) |
992
					 S2_X(0xa) | S2_Y(0x6) |
1271
					 S2_X(0xa) | S2_Y(0x6) |
993
					 S3_X(0x6) | S3_Y(0xa)));
1272
					 S3_X(0x6) | S3_Y(0xa)));
994
	WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD0, (S0_X(0xe) | S0_Y(0xb) |
1273
	WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD0, (S0_X(0xe) | S0_Y(0xb) |
995
					     S1_X(0x4) | S1_Y(0xc) |
1274
					     S1_X(0x4) | S1_Y(0xc) |
996
					     S2_X(0x1) | S2_Y(0x6) |
1275
					     S2_X(0x1) | S2_Y(0x6) |
997
					     S3_X(0xa) | S3_Y(0xe)));
1276
					     S3_X(0xa) | S3_Y(0xe)));
998
	WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD1, (S4_X(0x6) | S4_Y(0x1) |
1277
	WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD1, (S4_X(0x6) | S4_Y(0x1) |
999
					     S5_X(0x0) | S5_Y(0x0) |
1278
					     S5_X(0x0) | S5_Y(0x0) |
1000
					     S6_X(0xb) | S6_Y(0x4) |
1279
					     S6_X(0xb) | S6_Y(0x4) |
1001
					     S7_X(0x7) | S7_Y(0x8)));
1280
					     S7_X(0x7) | S7_Y(0x8)));
1002
 
1281
 
1003
	WREG32(VGT_STRMOUT_EN, 0);
1282
	WREG32(VGT_STRMOUT_EN, 0);
1004
	tmp = rdev->config.r600.max_pipes * 16;
1283
	tmp = rdev->config.r600.max_pipes * 16;
1005
	switch (rdev->family) {
1284
	switch (rdev->family) {
1006
	case CHIP_RV610:
1285
	case CHIP_RV610:
1007
	case CHIP_RV620:
1286
	case CHIP_RV620:
1008
	case CHIP_RS780:
1287
	case CHIP_RS780:
1009
	case CHIP_RS880:
1288
	case CHIP_RS880:
1010
		tmp += 32;
1289
		tmp += 32;
1011
		break;
1290
		break;
1012
	case CHIP_RV670:
1291
	case CHIP_RV670:
1013
		tmp += 128;
1292
		tmp += 128;
1014
		break;
1293
		break;
1015
	default:
1294
	default:
1016
		break;
1295
		break;
1017
	}
1296
	}
1018
	if (tmp > 256) {
1297
	if (tmp > 256) {
1019
		tmp = 256;
1298
		tmp = 256;
1020
	}
1299
	}
1021
	WREG32(VGT_ES_PER_GS, 128);
1300
	WREG32(VGT_ES_PER_GS, 128);
1022
	WREG32(VGT_GS_PER_ES, tmp);
1301
	WREG32(VGT_GS_PER_ES, tmp);
1023
	WREG32(VGT_GS_PER_VS, 2);
1302
	WREG32(VGT_GS_PER_VS, 2);
1024
	WREG32(VGT_GS_VERTEX_REUSE, 16);
1303
	WREG32(VGT_GS_VERTEX_REUSE, 16);
1025
 
1304
 
1026
	/* more default values. 2D/3D driver should adjust as needed */
1305
	/* more default values. 2D/3D driver should adjust as needed */
1027
	WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
1306
	WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
1028
	WREG32(VGT_STRMOUT_EN, 0);
1307
	WREG32(VGT_STRMOUT_EN, 0);
1029
	WREG32(SX_MISC, 0);
1308
	WREG32(SX_MISC, 0);
1030
	WREG32(PA_SC_MODE_CNTL, 0);
1309
	WREG32(PA_SC_MODE_CNTL, 0);
1031
	WREG32(PA_SC_AA_CONFIG, 0);
1310
	WREG32(PA_SC_AA_CONFIG, 0);
1032
	WREG32(PA_SC_LINE_STIPPLE, 0);
1311
	WREG32(PA_SC_LINE_STIPPLE, 0);
1033
	WREG32(SPI_INPUT_Z, 0);
1312
	WREG32(SPI_INPUT_Z, 0);
1034
	WREG32(SPI_PS_IN_CONTROL_0, NUM_INTERP(2));
1313
	WREG32(SPI_PS_IN_CONTROL_0, NUM_INTERP(2));
1035
	WREG32(CB_COLOR7_FRAG, 0);
1314
	WREG32(CB_COLOR7_FRAG, 0);
1036
 
1315
 
1037
	/* Clear render buffer base addresses */
1316
	/* Clear render buffer base addresses */
1038
	WREG32(CB_COLOR0_BASE, 0);
1317
	WREG32(CB_COLOR0_BASE, 0);
1039
	WREG32(CB_COLOR1_BASE, 0);
1318
	WREG32(CB_COLOR1_BASE, 0);
1040
	WREG32(CB_COLOR2_BASE, 0);
1319
	WREG32(CB_COLOR2_BASE, 0);
1041
	WREG32(CB_COLOR3_BASE, 0);
1320
	WREG32(CB_COLOR3_BASE, 0);
1042
	WREG32(CB_COLOR4_BASE, 0);
1321
	WREG32(CB_COLOR4_BASE, 0);
1043
	WREG32(CB_COLOR5_BASE, 0);
1322
	WREG32(CB_COLOR5_BASE, 0);
1044
	WREG32(CB_COLOR6_BASE, 0);
1323
	WREG32(CB_COLOR6_BASE, 0);
1045
	WREG32(CB_COLOR7_BASE, 0);
1324
	WREG32(CB_COLOR7_BASE, 0);
1046
	WREG32(CB_COLOR7_FRAG, 0);
1325
	WREG32(CB_COLOR7_FRAG, 0);
1047
 
1326
 
1048
	switch (rdev->family) {
1327
	switch (rdev->family) {
1049
	case CHIP_RV610:
1328
	case CHIP_RV610:
1050
	case CHIP_RV620:
1329
	case CHIP_RV620:
1051
	case CHIP_RS780:
1330
	case CHIP_RS780:
1052
	case CHIP_RS880:
1331
	case CHIP_RS880:
1053
		tmp = TC_L2_SIZE(8);
1332
		tmp = TC_L2_SIZE(8);
1054
		break;
1333
		break;
1055
	case CHIP_RV630:
1334
	case CHIP_RV630:
1056
	case CHIP_RV635:
1335
	case CHIP_RV635:
1057
		tmp = TC_L2_SIZE(4);
1336
		tmp = TC_L2_SIZE(4);
1058
		break;
1337
		break;
1059
	case CHIP_R600:
1338
	case CHIP_R600:
1060
		tmp = TC_L2_SIZE(0) | L2_DISABLE_LATE_HIT;
1339
		tmp = TC_L2_SIZE(0) | L2_DISABLE_LATE_HIT;
1061
		break;
1340
		break;
1062
	default:
1341
	default:
1063
		tmp = TC_L2_SIZE(0);
1342
		tmp = TC_L2_SIZE(0);
1064
		break;
1343
		break;
1065
	}
1344
	}
1066
	WREG32(TC_CNTL, tmp);
1345
	WREG32(TC_CNTL, tmp);
1067
 
1346
 
1068
	tmp = RREG32(HDP_HOST_PATH_CNTL);
1347
	tmp = RREG32(HDP_HOST_PATH_CNTL);
1069
	WREG32(HDP_HOST_PATH_CNTL, tmp);
1348
	WREG32(HDP_HOST_PATH_CNTL, tmp);
1070
 
1349
 
1071
	tmp = RREG32(ARB_POP);
1350
	tmp = RREG32(ARB_POP);
1072
	tmp |= ENABLE_TC128;
1351
	tmp |= ENABLE_TC128;
1073
	WREG32(ARB_POP, tmp);
1352
	WREG32(ARB_POP, tmp);
1074
 
1353
 
1075
	WREG32(PA_SC_MULTI_CHIP_CNTL, 0);
1354
	WREG32(PA_SC_MULTI_CHIP_CNTL, 0);
1076
	WREG32(PA_CL_ENHANCE, (CLIP_VTX_REORDER_ENA |
1355
	WREG32(PA_CL_ENHANCE, (CLIP_VTX_REORDER_ENA |
1077
			       NUM_CLIP_SEQ(3)));
1356
			       NUM_CLIP_SEQ(3)));
1078
	WREG32(PA_SC_ENHANCE, FORCE_EOV_MAX_CLK_CNT(4095));
1357
	WREG32(PA_SC_ENHANCE, FORCE_EOV_MAX_CLK_CNT(4095));
1079
}
1358
}
1080
 
1359
 
1081
 
1360
 
1082
/*
1361
/*
1083
 * Indirect registers accessor
1362
 * Indirect registers accessor
1084
 */
1363
 */
1085
u32 r600_pciep_rreg(struct radeon_device *rdev, u32 reg)
1364
u32 r600_pciep_rreg(struct radeon_device *rdev, u32 reg)
1086
{
1365
{
1087
	u32 r;
1366
	u32 r;
1088
 
1367
 
1089
	WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
1368
	WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
1090
	(void)RREG32(PCIE_PORT_INDEX);
1369
	(void)RREG32(PCIE_PORT_INDEX);
1091
	r = RREG32(PCIE_PORT_DATA);
1370
	r = RREG32(PCIE_PORT_DATA);
1092
	return r;
1371
	return r;
1093
}
1372
}
1094
 
1373
 
1095
void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v)
1374
void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v)
1096
{
1375
{
1097
	WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
1376
	WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
1098
	(void)RREG32(PCIE_PORT_INDEX);
1377
	(void)RREG32(PCIE_PORT_INDEX);
1099
	WREG32(PCIE_PORT_DATA, (v));
1378
	WREG32(PCIE_PORT_DATA, (v));
1100
	(void)RREG32(PCIE_PORT_DATA);
1379
	(void)RREG32(PCIE_PORT_DATA);
1101
}
1380
}
-
 
1381
 
-
 
1382
void r600_hdp_flush(struct radeon_device *rdev)
-
 
1383
{
-
 
1384
	WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
1102
 
1385
}
1103
 
1386
 
1104
/*
1387
/*
1105
 * CP & Ring
1388
 * CP & Ring
1106
 */
1389
 */
1107
void r600_cp_stop(struct radeon_device *rdev)
1390
void r600_cp_stop(struct radeon_device *rdev)
1108
{
1391
{
1109
	WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
1392
	WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
1110
}
1393
}
1111
int r600_cp_start(struct radeon_device *rdev)
1394
int r600_cp_start(struct radeon_device *rdev)
1112
{
1395
{
1113
	int r;
1396
	int r;
1114
	uint32_t cp_me;
1397
	uint32_t cp_me;
1115
 
1398
 
1116
	r = radeon_ring_lock(rdev, 7);
1399
	r = radeon_ring_lock(rdev, 7);
1117
	if (r) {
1400
	if (r) {
1118
		DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
1401
		DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
1119
		return r;
1402
		return r;
1120
	}
1403
	}
1121
	radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5));
1404
	radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5));
1122
	radeon_ring_write(rdev, 0x1);
1405
	radeon_ring_write(rdev, 0x1);
1123
	if (rdev->family < CHIP_RV770) {
1406
	if (rdev->family < CHIP_RV770) {
1124
		radeon_ring_write(rdev, 0x3);
1407
		radeon_ring_write(rdev, 0x3);
1125
		radeon_ring_write(rdev, rdev->config.r600.max_hw_contexts - 1);
1408
		radeon_ring_write(rdev, rdev->config.r600.max_hw_contexts - 1);
1126
	} else {
1409
	} else {
1127
		radeon_ring_write(rdev, 0x0);
1410
		radeon_ring_write(rdev, 0x0);
1128
		radeon_ring_write(rdev, rdev->config.rv770.max_hw_contexts - 1);
1411
		radeon_ring_write(rdev, rdev->config.rv770.max_hw_contexts - 1);
1129
	}
1412
	}
1130
	radeon_ring_write(rdev, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
1413
	radeon_ring_write(rdev, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
1131
	radeon_ring_write(rdev, 0);
1414
	radeon_ring_write(rdev, 0);
1132
	radeon_ring_write(rdev, 0);
1415
	radeon_ring_write(rdev, 0);
1133
	radeon_ring_unlock_commit(rdev);
1416
	radeon_ring_unlock_commit(rdev);
1134
 
1417
 
1135
	cp_me = 0xff;
1418
	cp_me = 0xff;
1136
	WREG32(R_0086D8_CP_ME_CNTL, cp_me);
1419
	WREG32(R_0086D8_CP_ME_CNTL, cp_me);
1137
	return 0;
1420
	return 0;
1138
}
1421
}
1139
void r600_cp_commit(struct radeon_device *rdev)
1422
void r600_cp_commit(struct radeon_device *rdev)
1140
{
1423
{
1141
	WREG32(CP_RB_WPTR, rdev->cp.wptr);
1424
	WREG32(CP_RB_WPTR, rdev->cp.wptr);
1142
	(void)RREG32(CP_RB_WPTR);
1425
	(void)RREG32(CP_RB_WPTR);
1143
}
1426
}
1144
 
1427
 
1145
void r600_ring_init(struct radeon_device *rdev, unsigned ring_size)
1428
void r600_ring_init(struct radeon_device *rdev, unsigned ring_size)
1146
{
1429
{
1147
	u32 rb_bufsz;
1430
	u32 rb_bufsz;
1148
 
1431
 
1149
	/* Align ring size */
1432
	/* Align ring size */
1150
	rb_bufsz = drm_order(ring_size / 8);
1433
	rb_bufsz = drm_order(ring_size / 8);
1151
	ring_size = (1 << (rb_bufsz + 1)) * 4;
1434
	ring_size = (1 << (rb_bufsz + 1)) * 4;
1152
	rdev->cp.ring_size = ring_size;
1435
	rdev->cp.ring_size = ring_size;
1153
	rdev->cp.align_mask = 16 - 1;
1436
	rdev->cp.align_mask = 16 - 1;
1154
}
1437
}
1155
 
1438
 
1156
 
1439
 
1157
/*
1440
/*
1158
 * GPU scratch registers helpers function.
1441
 * GPU scratch registers helpers function.
1159
 */
1442
 */
1160
void r600_scratch_init(struct radeon_device *rdev)
1443
void r600_scratch_init(struct radeon_device *rdev)
1161
{
1444
{
1162
	int i;
1445
	int i;
1163
 
1446
 
1164
	rdev->scratch.num_reg = 7;
1447
	rdev->scratch.num_reg = 7;
1165
	for (i = 0; i < rdev->scratch.num_reg; i++) {
1448
	for (i = 0; i < rdev->scratch.num_reg; i++) {
1166
		rdev->scratch.free[i] = true;
1449
		rdev->scratch.free[i] = true;
1167
		rdev->scratch.reg[i] = SCRATCH_REG0 + (i * 4);
1450
		rdev->scratch.reg[i] = SCRATCH_REG0 + (i * 4);
1168
	}
1451
	}
1169
}
1452
}
1170
int r600_set_surface_reg(struct radeon_device *rdev, int reg,
1453
int r600_set_surface_reg(struct radeon_device *rdev, int reg,
1171
			 uint32_t tiling_flags, uint32_t pitch,
1454
			 uint32_t tiling_flags, uint32_t pitch,
1172
			 uint32_t offset, uint32_t obj_size)
1455
			 uint32_t offset, uint32_t obj_size)
1173
{
1456
{
1174
	/* FIXME: implement */
1457
	/* FIXME: implement */
1175
	return 0;
1458
	return 0;
1176
}
1459
}
1177
 
1460
 
1178
void r600_clear_surface_reg(struct radeon_device *rdev, int reg)
1461
void r600_clear_surface_reg(struct radeon_device *rdev, int reg)
1179
{
1462
{
1180
	/* FIXME: implement */
1463
	/* FIXME: implement */
1181
}
1464
}
1182
 
1465
 
1183
 
1466
 
1184
bool r600_card_posted(struct radeon_device *rdev)
1467
bool r600_card_posted(struct radeon_device *rdev)
1185
{
1468
{
1186
	uint32_t reg;
1469
	uint32_t reg;
1187
 
1470
 
1188
	/* first check CRTCs */
1471
	/* first check CRTCs */
1189
	reg = RREG32(D1CRTC_CONTROL) |
1472
	reg = RREG32(D1CRTC_CONTROL) |
1190
		RREG32(D2CRTC_CONTROL);
1473
		RREG32(D2CRTC_CONTROL);
1191
	if (reg & CRTC_EN)
1474
	if (reg & CRTC_EN)
1192
		return true;
1475
		return true;
1193
 
1476
 
1194
	/* then check MEM_SIZE, in case the crtcs are off */
1477
	/* then check MEM_SIZE, in case the crtcs are off */
1195
	if (RREG32(CONFIG_MEMSIZE))
1478
	if (RREG32(CONFIG_MEMSIZE))
1196
		return true;
1479
		return true;
1197
 
1480
 
1198
	return false;
1481
	return false;
1199
}
1482
}
1200
 
1483
 
1201
int r600_startup(struct radeon_device *rdev)
1484
int r600_startup(struct radeon_device *rdev)
1202
{
1485
{
1203
	int r;
1486
	int r;
1204
 
1487
 
1205
	r600_mc_program(rdev);
1488
	r600_mc_program(rdev);
1206
	if (rdev->flags & RADEON_IS_AGP) {
1489
	if (rdev->flags & RADEON_IS_AGP) {
1207
		r600_agp_enable(rdev);
1490
		r600_agp_enable(rdev);
1208
	} else {
1491
	} else {
1209
		r = r600_pcie_gart_enable(rdev);
1492
		r = r600_pcie_gart_enable(rdev);
1210
		if (r)
1493
		if (r)
1211
			return r;
1494
			return r;
1212
	}
1495
	}
1213
	r600_gpu_init(rdev);
1496
	r600_gpu_init(rdev);
1214
 
1497
 
1215
//	r = radeon_object_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
1498
//	r = radeon_object_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
1216
//			      &rdev->r600_blit.shader_gpu_addr);
1499
//			      &rdev->r600_blit.shader_gpu_addr);
1217
//	if (r) {
1500
//	if (r) {
1218
//		DRM_ERROR("failed to pin blit object %d\n", r);
1501
//		DRM_ERROR("failed to pin blit object %d\n", r);
1219
//		return r;
1502
//		return r;
1220
//	}
1503
//	}
1221
 
1504
 
1222
//	r = radeon_ring_init(rdev, rdev->cp.ring_size);
1505
//	r = radeon_ring_init(rdev, rdev->cp.ring_size);
1223
//	if (r)
1506
//	if (r)
1224
//		return r;
1507
//		return r;
1225
//	r = r600_cp_load_microcode(rdev);
1508
//	r = r600_cp_load_microcode(rdev);
1226
//	if (r)
1509
//	if (r)
1227
//		return r;
1510
//		return r;
1228
//	r = r600_cp_resume(rdev);
1511
//	r = r600_cp_resume(rdev);
1229
//	if (r)
1512
//	if (r)
1230
//		return r;
1513
//		return r;
1231
	/* write back buffer are not vital so don't worry about failure */
1514
	/* write back buffer are not vital so don't worry about failure */
1232
//	r600_wb_enable(rdev);
1515
//	r600_wb_enable(rdev);
1233
	return 0;
1516
	return 0;
1234
}
1517
}
1235
 
1518
 
1236
void r600_vga_set_state(struct radeon_device *rdev, bool state)
1519
void r600_vga_set_state(struct radeon_device *rdev, bool state)
1237
{
1520
{
1238
	uint32_t temp;
1521
	uint32_t temp;
1239
 
1522
 
1240
	temp = RREG32(CONFIG_CNTL);
1523
	temp = RREG32(CONFIG_CNTL);
1241
	if (state == false) {
1524
	if (state == false) {
1242
		temp &= ~(1<<0);
1525
		temp &= ~(1<<0);
1243
		temp |= (1<<1);
1526
		temp |= (1<<1);
1244
	} else {
1527
	} else {
1245
		temp &= ~(1<<1);
1528
		temp &= ~(1<<1);
1246
	}
1529
	}
1247
	WREG32(CONFIG_CNTL, temp);
1530
	WREG32(CONFIG_CNTL, temp);
1248
}
1531
}
1249
 
1532
 
1250
 
1533
 
1251
 
1534
 
1252
 
1535
 
1253
 
1536
 
1254
/* Plan is to move initialization in that function and use
1537
/* Plan is to move initialization in that function and use
1255
 * helper function so that radeon_device_init pretty much
1538
 * helper function so that radeon_device_init pretty much
1256
 * do nothing more than calling asic specific function. This
1539
 * do nothing more than calling asic specific function. This
1257
 * should also allow to remove a bunch of callback function
1540
 * should also allow to remove a bunch of callback function
1258
 * like vram_info.
1541
 * like vram_info.
1259
 */
1542
 */
1260
int r600_init(struct radeon_device *rdev)
1543
int r600_init(struct radeon_device *rdev)
1261
{
1544
{
1262
	int r;
1545
	int r;
1263
 
1546
 
1264
	r = radeon_dummy_page_init(rdev);
1547
	r = radeon_dummy_page_init(rdev);
1265
	if (r)
1548
	if (r)
1266
		return r;
1549
		return r;
1267
	if (r600_debugfs_mc_info_init(rdev)) {
1550
	if (r600_debugfs_mc_info_init(rdev)) {
1268
		DRM_ERROR("Failed to register debugfs file for mc !\n");
1551
		DRM_ERROR("Failed to register debugfs file for mc !\n");
1269
	}
1552
	}
1270
	/* This don't do much */
1553
	/* This don't do much */
1271
	r = radeon_gem_init(rdev);
1554
	r = radeon_gem_init(rdev);
1272
	if (r)
1555
	if (r)
1273
		return r;
1556
		return r;
1274
	/* Read BIOS */
1557
	/* Read BIOS */
1275
	if (!radeon_get_bios(rdev)) {
1558
	if (!radeon_get_bios(rdev)) {
1276
		if (ASIC_IS_AVIVO(rdev))
1559
		if (ASIC_IS_AVIVO(rdev))
1277
			return -EINVAL;
1560
			return -EINVAL;
1278
	}
1561
	}
1279
	/* Must be an ATOMBIOS */
1562
	/* Must be an ATOMBIOS */
1280
	if (!rdev->is_atom_bios) {
1563
	if (!rdev->is_atom_bios) {
1281
		dev_err(rdev->dev, "Expecting atombios for R600 GPU\n");
1564
		dev_err(rdev->dev, "Expecting atombios for R600 GPU\n");
1282
		return -EINVAL;
1565
		return -EINVAL;
1283
	}
1566
	}
1284
	r = radeon_atombios_init(rdev);
1567
	r = radeon_atombios_init(rdev);
1285
	if (r)
1568
	if (r)
1286
		return r;
1569
		return r;
1287
	/* Post card if necessary */
1570
	/* Post card if necessary */
1288
	if (!r600_card_posted(rdev) && rdev->bios) {
1571
	if (!r600_card_posted(rdev)) {
-
 
1572
		if (!rdev->bios) {
-
 
1573
			dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
-
 
1574
			return -EINVAL;
-
 
1575
		}
1289
		DRM_INFO("GPU not posted. posting now...\n");
1576
		DRM_INFO("GPU not posted. posting now...\n");
1290
		atom_asic_init(rdev->mode_info.atom_context);
1577
		atom_asic_init(rdev->mode_info.atom_context);
1291
	}
1578
	}
1292
	/* Initialize scratch registers */
1579
	/* Initialize scratch registers */
1293
	r600_scratch_init(rdev);
1580
	r600_scratch_init(rdev);
1294
	/* Initialize surface registers */
1581
	/* Initialize surface registers */
1295
	radeon_surface_init(rdev);
1582
	radeon_surface_init(rdev);
1296
	/* Initialize clocks */
1583
	/* Initialize clocks */
1297
	radeon_get_clock_info(rdev->ddev);
1584
	radeon_get_clock_info(rdev->ddev);
1298
	r = radeon_clocks_init(rdev);
1585
	r = radeon_clocks_init(rdev);
1299
	if (r)
1586
	if (r)
1300
		return r;
1587
		return r;
1301
	/* Initialize power management */
1588
	/* Initialize power management */
1302
	radeon_pm_init(rdev);
1589
	radeon_pm_init(rdev);
1303
	/* Fence driver */
1590
	/* Fence driver */
1304
//	r = radeon_fence_driver_init(rdev);
1591
//	r = radeon_fence_driver_init(rdev);
1305
//	if (r)
1592
//	if (r)
1306
//		return r;
1593
//		return r;
1307
	r = r600_mc_init(rdev);
1594
	r = r600_mc_init(rdev);
1308
    dbgprintf("mc vram location %x\n", rdev->mc.vram_location);
1595
    dbgprintf("mc vram location %x\n", rdev->mc.vram_location);
1309
	if (r)
1596
	if (r)
1310
		return r;
1597
		return r;
1311
	/* Memory manager */
1598
	/* Memory manager */
1312
	r = radeon_object_init(rdev);
1599
	r = radeon_bo_init(rdev);
1313
	if (r)
1600
	if (r)
1314
		return r;
1601
		return r;
-
 
1602
 
-
 
1603
//	r = radeon_irq_kms_init(rdev);
-
 
1604
//	if (r)
-
 
1605
//		return r;
-
 
1606
 
1315
//	rdev->cp.ring_obj = NULL;
1607
//	rdev->cp.ring_obj = NULL;
1316
//	r600_ring_init(rdev, 1024 * 1024);
1608
//	r600_ring_init(rdev, 1024 * 1024);
1317
 
1609
 
1318
//	if (!rdev->me_fw || !rdev->pfp_fw) {
1610
//	rdev->ih.ring_obj = NULL;
1319
//		r = r600_cp_init_microcode(rdev);
-
 
1320
//		if (r) {
-
 
1321
//			DRM_ERROR("Failed to load firmware!\n");
-
 
1322
//			return r;
-
 
1323
//		}
-
 
1324
//	}
1611
//	r600_ih_ring_init(rdev, 64 * 1024);
1325
 
1612
 
1326
	r = r600_pcie_gart_init(rdev);
1613
	r = r600_pcie_gart_init(rdev);
1327
	if (r)
1614
	if (r)
1328
		return r;
1615
		return r;
1329
 
-
 
1330
	rdev->accel_working = true;
1616
 
1331
//	r = r600_blit_init(rdev);
1617
//	r = r600_blit_init(rdev);
1332
//	if (r) {
1618
//	if (r) {
1333
//		DRM_ERROR("radeon: failled blitter (%d).\n", r);
1619
//		DRM_ERROR("radeon: failled blitter (%d).\n", r);
1334
//		return r;
1620
//		return r;
1335
//	}
1621
//	}
-
 
1622
 
1336
 
1623
	rdev->accel_working = true;
1337
	r = r600_startup(rdev);
1624
	r = r600_startup(rdev);
1338
	if (r) {
1625
	if (r) {
1339
//		r600_suspend(rdev);
1626
//		r600_suspend(rdev);
1340
//		r600_wb_fini(rdev);
1627
//		r600_wb_fini(rdev);
1341
//		radeon_ring_fini(rdev);
1628
//		radeon_ring_fini(rdev);
1342
		r600_pcie_gart_fini(rdev);
1629
		r600_pcie_gart_fini(rdev);
1343
		rdev->accel_working = false;
1630
		rdev->accel_working = false;
1344
	}
1631
	}
1345
	if (rdev->accel_working) {
1632
	if (rdev->accel_working) {
1346
//		r = radeon_ib_pool_init(rdev);
1633
//		r = radeon_ib_pool_init(rdev);
1347
//		if (r) {
1634
//		if (r) {
1348
//			DRM_ERROR("radeon: failled initializing IB pool (%d).\n", r);
1635
//			DRM_ERROR("radeon: failled initializing IB pool (%d).\n", r);
1349
//			rdev->accel_working = false;
1636
//			rdev->accel_working = false;
1350
//		}
1637
//		}
1351
//		r = r600_ib_test(rdev);
1638
//		r = r600_ib_test(rdev);
1352
//		if (r) {
1639
//		if (r) {
1353
//			DRM_ERROR("radeon: failled testing IB (%d).\n", r);
1640
//			DRM_ERROR("radeon: failled testing IB (%d).\n", r);
1354
//			rdev->accel_working = false;
1641
//			rdev->accel_working = false;
1355
//		}
1642
//		}
1356
	}
1643
	}
1357
	return 0;
1644
	return 0;
1358
}
1645
}
1359
 
1646
 
1360
 
1647
 
1361
 
1648
 
1362
 
1649
 
1363
 
1650
 
1364
 
1651
 
1365
 
1652
 
1366
 
1653
 
1367
 
1654
 
1368
/*
1655
/*
1369
 * Debugfs info
1656
 * Debugfs info
1370
 */
1657
 */
1371
#if defined(CONFIG_DEBUG_FS)
1658
#if defined(CONFIG_DEBUG_FS)
1372
 
1659
 
1373
static int r600_debugfs_cp_ring_info(struct seq_file *m, void *data)
1660
static int r600_debugfs_cp_ring_info(struct seq_file *m, void *data)
1374
{
1661
{
1375
	struct drm_info_node *node = (struct drm_info_node *) m->private;
1662
	struct drm_info_node *node = (struct drm_info_node *) m->private;
1376
	struct drm_device *dev = node->minor->dev;
1663
	struct drm_device *dev = node->minor->dev;
1377
	struct radeon_device *rdev = dev->dev_private;
1664
	struct radeon_device *rdev = dev->dev_private;
1378
	uint32_t rdp, wdp;
-
 
1379
	unsigned count, i, j;
1665
	unsigned count, i, j;
1380
 
1666
 
1381
	radeon_ring_free_size(rdev);
1667
	radeon_ring_free_size(rdev);
1382
	rdp = RREG32(CP_RB_RPTR);
-
 
1383
	wdp = RREG32(CP_RB_WPTR);
-
 
1384
	count = (rdp + rdev->cp.ring_size - wdp) & rdev->cp.ptr_mask;
1668
	count = (rdev->cp.ring_size / 4) - rdev->cp.ring_free_dw;
1385
	seq_printf(m, "CP_STAT 0x%08x\n", RREG32(CP_STAT));
1669
	seq_printf(m, "CP_STAT 0x%08x\n", RREG32(CP_STAT));
1386
	seq_printf(m, "CP_RB_WPTR 0x%08x\n", wdp);
1670
	seq_printf(m, "CP_RB_WPTR 0x%08x\n", RREG32(CP_RB_WPTR));
1387
	seq_printf(m, "CP_RB_RPTR 0x%08x\n", rdp);
1671
	seq_printf(m, "CP_RB_RPTR 0x%08x\n", RREG32(CP_RB_RPTR));
-
 
1672
	seq_printf(m, "driver's copy of the CP_RB_WPTR 0x%08x\n", rdev->cp.wptr);
-
 
1673
	seq_printf(m, "driver's copy of the CP_RB_RPTR 0x%08x\n", rdev->cp.rptr);
1388
	seq_printf(m, "%u free dwords in ring\n", rdev->cp.ring_free_dw);
1674
	seq_printf(m, "%u free dwords in ring\n", rdev->cp.ring_free_dw);
1389
	seq_printf(m, "%u dwords in ring\n", count);
1675
	seq_printf(m, "%u dwords in ring\n", count);
-
 
1676
	i = rdev->cp.rptr;
1390
	for (j = 0; j <= count; j++) {
1677
	for (j = 0; j <= count; j++) {
1391
		i = (rdp + j) & rdev->cp.ptr_mask;
-
 
1392
		seq_printf(m, "r[%04d]=0x%08x\n", i, rdev->cp.ring[i]);
1678
		seq_printf(m, "r[%04d]=0x%08x\n", i, rdev->cp.ring[i]);
-
 
1679
		i = (i + 1) & rdev->cp.ptr_mask;
1393
	}
1680
	}
1394
	return 0;
1681
	return 0;
1395
}
1682
}
1396
 
1683
 
1397
static int r600_debugfs_mc_info(struct seq_file *m, void *data)
1684
static int r600_debugfs_mc_info(struct seq_file *m, void *data)
1398
{
1685
{
1399
	struct drm_info_node *node = (struct drm_info_node *) m->private;
1686
	struct drm_info_node *node = (struct drm_info_node *) m->private;
1400
	struct drm_device *dev = node->minor->dev;
1687
	struct drm_device *dev = node->minor->dev;
1401
	struct radeon_device *rdev = dev->dev_private;
1688
	struct radeon_device *rdev = dev->dev_private;
1402
 
1689
 
1403
	DREG32_SYS(m, rdev, R_000E50_SRBM_STATUS);
1690
	DREG32_SYS(m, rdev, R_000E50_SRBM_STATUS);
1404
	DREG32_SYS(m, rdev, VM_L2_STATUS);
1691
	DREG32_SYS(m, rdev, VM_L2_STATUS);
1405
	return 0;
1692
	return 0;
1406
}
1693
}
1407
 
1694
 
1408
static struct drm_info_list r600_mc_info_list[] = {
1695
static struct drm_info_list r600_mc_info_list[] = {
1409
	{"r600_mc_info", r600_debugfs_mc_info, 0, NULL},
1696
	{"r600_mc_info", r600_debugfs_mc_info, 0, NULL},
1410
	{"r600_ring_info", r600_debugfs_cp_ring_info, 0, NULL},
1697
	{"r600_ring_info", r600_debugfs_cp_ring_info, 0, NULL},
1411
};
1698
};
1412
#endif
1699
#endif
1413
 
1700
 
1414
int r600_debugfs_mc_info_init(struct radeon_device *rdev)
1701
int r600_debugfs_mc_info_init(struct radeon_device *rdev)
1415
{
1702
{
1416
#if defined(CONFIG_DEBUG_FS)
1703
#if defined(CONFIG_DEBUG_FS)
1417
	return radeon_debugfs_add_files(rdev, r600_mc_info_list, ARRAY_SIZE(r600_mc_info_list));
1704
	return radeon_debugfs_add_files(rdev, r600_mc_info_list, ARRAY_SIZE(r600_mc_info_list));
1418
#else
1705
#else
1419
	return 0;
1706
	return 0;
1420
#endif
1707
#endif
1421
}
1708
}
1422
>
1709
>
1423
>
1710
>
1424
>
1711
>
1425
>
1712
>
1426
>
1713
>
1427
>
1714
>