Subversion Repositories Kolibri OS

Rev

Rev 1428 | Rev 1963 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
1128 serge 1
/*
2
 * Copyright 2008 Advanced Micro Devices, Inc.
3
 * Copyright 2008 Red Hat Inc.
4
 * Copyright 2009 Jerome Glisse.
5
 *
6
 * Permission is hereby granted, free of charge, to any person obtaining a
7
 * copy of this software and associated documentation files (the "Software"),
8
 * to deal in the Software without restriction, including without limitation
9
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10
 * and/or sell copies of the Software, and to permit persons to whom the
11
 * Software is furnished to do so, subject to the following conditions:
12
 *
13
 * The above copyright notice and this permission notice shall be included in
14
 * all copies or substantial portions of the Software.
15
 *
16
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22
 * OTHER DEALINGS IN THE SOFTWARE.
23
 *
24
 * Authors: Dave Airlie
25
 *          Alex Deucher
26
 *          Jerome Glisse
27
 */
1233 serge 28
#include 
1221 serge 29
#include 
1128 serge 30
#include "drmP.h"
1221 serge 31
#include "radeon_drm.h"
1128 serge 32
#include "radeon.h"
1221 serge 33
#include "radeon_mode.h"
34
#include "r600d.h"
35
#include "atom.h"
36
#include "avivod.h"
1128 serge 37
 
1221 serge 38
#define PFP_UCODE_SIZE 576
39
#define PM4_UCODE_SIZE 1792
1321 serge 40
#define RLC_UCODE_SIZE 768
1221 serge 41
#define R700_PFP_UCODE_SIZE 848
42
#define R700_PM4_UCODE_SIZE 1360
1321 serge 43
#define R700_RLC_UCODE_SIZE 1024
1128 serge 44
 
1221 serge 45
/* Firmware Names */
46
MODULE_FIRMWARE("radeon/R600_pfp.bin");
47
MODULE_FIRMWARE("radeon/R600_me.bin");
48
MODULE_FIRMWARE("radeon/RV610_pfp.bin");
49
MODULE_FIRMWARE("radeon/RV610_me.bin");
50
MODULE_FIRMWARE("radeon/RV630_pfp.bin");
51
MODULE_FIRMWARE("radeon/RV630_me.bin");
52
MODULE_FIRMWARE("radeon/RV620_pfp.bin");
53
MODULE_FIRMWARE("radeon/RV620_me.bin");
54
MODULE_FIRMWARE("radeon/RV635_pfp.bin");
55
MODULE_FIRMWARE("radeon/RV635_me.bin");
56
MODULE_FIRMWARE("radeon/RV670_pfp.bin");
57
MODULE_FIRMWARE("radeon/RV670_me.bin");
58
MODULE_FIRMWARE("radeon/RS780_pfp.bin");
59
MODULE_FIRMWARE("radeon/RS780_me.bin");
60
MODULE_FIRMWARE("radeon/RV770_pfp.bin");
61
MODULE_FIRMWARE("radeon/RV770_me.bin");
62
MODULE_FIRMWARE("radeon/RV730_pfp.bin");
63
MODULE_FIRMWARE("radeon/RV730_me.bin");
64
MODULE_FIRMWARE("radeon/RV710_pfp.bin");
65
MODULE_FIRMWARE("radeon/RV710_me.bin");
1321 serge 66
MODULE_FIRMWARE("radeon/R600_rlc.bin");
67
MODULE_FIRMWARE("radeon/R700_rlc.bin");
1221 serge 68
 
69
int r600_debugfs_mc_info_init(struct radeon_device *rdev);
70
 
71
/* r600,rv610,rv630,rv620,rv635,rv670 */
1128 serge 72
int r600_mc_wait_for_idle(struct radeon_device *rdev);
73
void r600_gpu_init(struct radeon_device *rdev);
1221 serge 74
void r600_fini(struct radeon_device *rdev);
1128 serge 75
 
1321 serge 76
/* hpd for digital panel detect/disconnect */
77
bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
78
{
79
	bool connected = false;
80
 
81
	if (ASIC_IS_DCE3(rdev)) {
82
		switch (hpd) {
83
		case RADEON_HPD_1:
84
			if (RREG32(DC_HPD1_INT_STATUS) & DC_HPDx_SENSE)
85
				connected = true;
86
			break;
87
		case RADEON_HPD_2:
88
			if (RREG32(DC_HPD2_INT_STATUS) & DC_HPDx_SENSE)
89
				connected = true;
90
			break;
91
		case RADEON_HPD_3:
92
			if (RREG32(DC_HPD3_INT_STATUS) & DC_HPDx_SENSE)
93
				connected = true;
94
			break;
95
		case RADEON_HPD_4:
96
			if (RREG32(DC_HPD4_INT_STATUS) & DC_HPDx_SENSE)
97
				connected = true;
98
			break;
99
			/* DCE 3.2 */
100
		case RADEON_HPD_5:
101
			if (RREG32(DC_HPD5_INT_STATUS) & DC_HPDx_SENSE)
102
				connected = true;
103
			break;
104
		case RADEON_HPD_6:
105
			if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE)
106
				connected = true;
107
			break;
108
		default:
109
			break;
110
		}
111
	} else {
112
		switch (hpd) {
113
		case RADEON_HPD_1:
114
			if (RREG32(DC_HOT_PLUG_DETECT1_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
115
				connected = true;
116
			break;
117
		case RADEON_HPD_2:
118
			if (RREG32(DC_HOT_PLUG_DETECT2_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
119
				connected = true;
120
			break;
121
		case RADEON_HPD_3:
122
			if (RREG32(DC_HOT_PLUG_DETECT3_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
123
				connected = true;
124
			break;
125
		default:
126
			break;
127
		}
128
	}
129
	return connected;
130
}
131
 
132
void r600_hpd_set_polarity(struct radeon_device *rdev,
133
			   enum radeon_hpd_id hpd)
134
{
135
	u32 tmp;
136
	bool connected = r600_hpd_sense(rdev, hpd);
137
 
138
	if (ASIC_IS_DCE3(rdev)) {
139
		switch (hpd) {
140
		case RADEON_HPD_1:
141
			tmp = RREG32(DC_HPD1_INT_CONTROL);
142
			if (connected)
143
				tmp &= ~DC_HPDx_INT_POLARITY;
144
			else
145
				tmp |= DC_HPDx_INT_POLARITY;
146
			WREG32(DC_HPD1_INT_CONTROL, tmp);
147
			break;
148
		case RADEON_HPD_2:
149
			tmp = RREG32(DC_HPD2_INT_CONTROL);
150
			if (connected)
151
				tmp &= ~DC_HPDx_INT_POLARITY;
152
			else
153
				tmp |= DC_HPDx_INT_POLARITY;
154
			WREG32(DC_HPD2_INT_CONTROL, tmp);
155
			break;
156
		case RADEON_HPD_3:
157
			tmp = RREG32(DC_HPD3_INT_CONTROL);
158
			if (connected)
159
				tmp &= ~DC_HPDx_INT_POLARITY;
160
			else
161
				tmp |= DC_HPDx_INT_POLARITY;
162
			WREG32(DC_HPD3_INT_CONTROL, tmp);
163
			break;
164
		case RADEON_HPD_4:
165
			tmp = RREG32(DC_HPD4_INT_CONTROL);
166
			if (connected)
167
				tmp &= ~DC_HPDx_INT_POLARITY;
168
			else
169
				tmp |= DC_HPDx_INT_POLARITY;
170
			WREG32(DC_HPD4_INT_CONTROL, tmp);
171
			break;
172
		case RADEON_HPD_5:
173
			tmp = RREG32(DC_HPD5_INT_CONTROL);
174
			if (connected)
175
				tmp &= ~DC_HPDx_INT_POLARITY;
176
			else
177
				tmp |= DC_HPDx_INT_POLARITY;
178
			WREG32(DC_HPD5_INT_CONTROL, tmp);
179
			break;
180
			/* DCE 3.2 */
181
		case RADEON_HPD_6:
182
			tmp = RREG32(DC_HPD6_INT_CONTROL);
183
			if (connected)
184
				tmp &= ~DC_HPDx_INT_POLARITY;
185
			else
186
				tmp |= DC_HPDx_INT_POLARITY;
187
			WREG32(DC_HPD6_INT_CONTROL, tmp);
188
			break;
189
		default:
190
			break;
191
		}
192
	} else {
193
		switch (hpd) {
194
		case RADEON_HPD_1:
195
			tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL);
196
			if (connected)
197
				tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
198
			else
199
				tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
200
			WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
201
			break;
202
		case RADEON_HPD_2:
203
			tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL);
204
			if (connected)
205
				tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
206
			else
207
				tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
208
			WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
209
			break;
210
		case RADEON_HPD_3:
211
			tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL);
212
			if (connected)
213
				tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
214
			else
215
				tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
216
			WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
217
			break;
218
		default:
219
			break;
220
		}
221
	}
222
}
223
 
224
void r600_hpd_init(struct radeon_device *rdev)
225
{
226
	struct drm_device *dev = rdev->ddev;
227
	struct drm_connector *connector;
228
 
229
	if (ASIC_IS_DCE3(rdev)) {
230
		u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) | DC_HPDx_RX_INT_TIMER(0xfa);
231
		if (ASIC_IS_DCE32(rdev))
232
			tmp |= DC_HPDx_EN;
233
 
234
		list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
235
			struct radeon_connector *radeon_connector = to_radeon_connector(connector);
236
			switch (radeon_connector->hpd.hpd) {
237
			case RADEON_HPD_1:
238
				WREG32(DC_HPD1_CONTROL, tmp);
1403 serge 239
//               rdev->irq.hpd[0] = true;
1321 serge 240
				break;
241
			case RADEON_HPD_2:
242
				WREG32(DC_HPD2_CONTROL, tmp);
1403 serge 243
//               rdev->irq.hpd[1] = true;
1321 serge 244
				break;
245
			case RADEON_HPD_3:
246
				WREG32(DC_HPD3_CONTROL, tmp);
1403 serge 247
//               rdev->irq.hpd[2] = true;
1321 serge 248
				break;
249
			case RADEON_HPD_4:
250
				WREG32(DC_HPD4_CONTROL, tmp);
1403 serge 251
//               rdev->irq.hpd[3] = true;
1321 serge 252
				break;
253
				/* DCE 3.2 */
254
			case RADEON_HPD_5:
255
				WREG32(DC_HPD5_CONTROL, tmp);
1403 serge 256
//               rdev->irq.hpd[4] = true;
1321 serge 257
				break;
258
			case RADEON_HPD_6:
259
				WREG32(DC_HPD6_CONTROL, tmp);
1403 serge 260
//               rdev->irq.hpd[5] = true;
1321 serge 261
				break;
262
			default:
263
				break;
264
			}
265
		}
266
	} else {
267
		list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
268
			struct radeon_connector *radeon_connector = to_radeon_connector(connector);
269
			switch (radeon_connector->hpd.hpd) {
270
			case RADEON_HPD_1:
271
				WREG32(DC_HOT_PLUG_DETECT1_CONTROL, DC_HOT_PLUG_DETECTx_EN);
1403 serge 272
//               rdev->irq.hpd[0] = true;
1321 serge 273
				break;
274
			case RADEON_HPD_2:
275
				WREG32(DC_HOT_PLUG_DETECT2_CONTROL, DC_HOT_PLUG_DETECTx_EN);
1403 serge 276
//               rdev->irq.hpd[1] = true;
1321 serge 277
				break;
278
			case RADEON_HPD_3:
279
				WREG32(DC_HOT_PLUG_DETECT3_CONTROL, DC_HOT_PLUG_DETECTx_EN);
1403 serge 280
//               rdev->irq.hpd[2] = true;
1321 serge 281
				break;
282
			default:
283
				break;
284
			}
285
		}
286
	}
1403 serge 287
//   if (rdev->irq.installed)
288
//   r600_irq_set(rdev);
1321 serge 289
}
290
 
291
void r600_hpd_fini(struct radeon_device *rdev)
292
{
293
	struct drm_device *dev = rdev->ddev;
294
	struct drm_connector *connector;
295
 
296
	if (ASIC_IS_DCE3(rdev)) {
297
		list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
298
			struct radeon_connector *radeon_connector = to_radeon_connector(connector);
299
			switch (radeon_connector->hpd.hpd) {
300
			case RADEON_HPD_1:
301
				WREG32(DC_HPD1_CONTROL, 0);
1403 serge 302
//               rdev->irq.hpd[0] = false;
1321 serge 303
				break;
304
			case RADEON_HPD_2:
305
				WREG32(DC_HPD2_CONTROL, 0);
1403 serge 306
//               rdev->irq.hpd[1] = false;
1321 serge 307
				break;
308
			case RADEON_HPD_3:
309
				WREG32(DC_HPD3_CONTROL, 0);
1403 serge 310
//               rdev->irq.hpd[2] = false;
1321 serge 311
				break;
312
			case RADEON_HPD_4:
313
				WREG32(DC_HPD4_CONTROL, 0);
1403 serge 314
//               rdev->irq.hpd[3] = false;
1321 serge 315
				break;
316
				/* DCE 3.2 */
317
			case RADEON_HPD_5:
318
				WREG32(DC_HPD5_CONTROL, 0);
1403 serge 319
//               rdev->irq.hpd[4] = false;
1321 serge 320
				break;
321
			case RADEON_HPD_6:
322
				WREG32(DC_HPD6_CONTROL, 0);
1403 serge 323
//               rdev->irq.hpd[5] = false;
1321 serge 324
				break;
325
			default:
326
				break;
327
			}
328
		}
329
	} else {
330
		list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
331
			struct radeon_connector *radeon_connector = to_radeon_connector(connector);
332
			switch (radeon_connector->hpd.hpd) {
333
			case RADEON_HPD_1:
334
				WREG32(DC_HOT_PLUG_DETECT1_CONTROL, 0);
1403 serge 335
//               rdev->irq.hpd[0] = false;
1321 serge 336
				break;
337
			case RADEON_HPD_2:
338
				WREG32(DC_HOT_PLUG_DETECT2_CONTROL, 0);
1403 serge 339
//               rdev->irq.hpd[1] = false;
1321 serge 340
				break;
341
			case RADEON_HPD_3:
342
				WREG32(DC_HOT_PLUG_DETECT3_CONTROL, 0);
1403 serge 343
//               rdev->irq.hpd[2] = false;
1321 serge 344
				break;
345
			default:
346
				break;
347
			}
348
		}
349
	}
350
}
351
 
1128 serge 352
/*
1221 serge 353
 * R600 PCIE GART
1128 serge 354
 */
1221 serge 355
void r600_pcie_gart_tlb_flush(struct radeon_device *rdev)
356
{
357
	unsigned i;
358
	u32 tmp;
1128 serge 359
 
1430 serge 360
	/* flush hdp cache so updates hit vram */
361
	WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
362
 
1221 serge 363
	WREG32(VM_CONTEXT0_INVALIDATION_LOW_ADDR, rdev->mc.gtt_start >> 12);
364
	WREG32(VM_CONTEXT0_INVALIDATION_HIGH_ADDR, (rdev->mc.gtt_end - 1) >> 12);
365
	WREG32(VM_CONTEXT0_REQUEST_RESPONSE, REQUEST_TYPE(1));
366
	for (i = 0; i < rdev->usec_timeout; i++) {
367
		/* read MC_STATUS */
368
		tmp = RREG32(VM_CONTEXT0_REQUEST_RESPONSE);
369
		tmp = (tmp & RESPONSE_TYPE_MASK) >> RESPONSE_TYPE_SHIFT;
370
		if (tmp == 2) {
371
			printk(KERN_WARNING "[drm] r600 flush TLB failed\n");
372
			return;
373
		}
374
		if (tmp) {
375
			return;
376
		}
377
		udelay(1);
1128 serge 378
	}
1221 serge 379
}
1128 serge 380
 
1221 serge 381
int r600_pcie_gart_init(struct radeon_device *rdev)
382
{
383
	int r;
384
 
385
	if (rdev->gart.table.vram.robj) {
386
		WARN(1, "R600 PCIE GART already initialized.\n");
387
		return 0;
388
	}
389
	/* Initialize common gart structure */
390
	r = radeon_gart_init(rdev);
391
	if (r)
392
		return r;
393
	rdev->gart.table_size = rdev->gart.num_gpu_pages * 8;
394
	return radeon_gart_table_vram_alloc(rdev);
395
}
396
 
397
int r600_pcie_gart_enable(struct radeon_device *rdev)
398
{
399
	u32 tmp;
400
	int r, i;
401
 
402
	if (rdev->gart.table.vram.robj == NULL) {
403
		dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
404
		return -EINVAL;
405
	}
406
	r = radeon_gart_table_vram_pin(rdev);
407
	if (r)
408
		return r;
1430 serge 409
	radeon_gart_restore(rdev);
1221 serge 410
 
411
	/* Setup L2 cache */
412
	WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
413
				ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
414
				EFFECTIVE_L2_QUEUE_SIZE(7));
415
	WREG32(VM_L2_CNTL2, 0);
416
	WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
417
	/* Setup TLB control */
418
	tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
419
		SYSTEM_ACCESS_MODE_NOT_IN_SYS |
420
		EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
421
		ENABLE_WAIT_L2_QUERY;
422
	WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
423
	WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
424
	WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp | ENABLE_L1_STRICT_ORDERING);
425
	WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
426
	WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
427
	WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
428
	WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
429
	WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
430
	WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
431
	WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
432
	WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
433
	WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
434
	WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
435
	WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
436
	WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
437
	WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
438
	WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
439
	WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
440
				RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
441
	WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
442
			(u32)(rdev->dummy_page.addr >> 12));
443
	for (i = 1; i < 7; i++)
444
		WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
445
 
446
	r600_pcie_gart_tlb_flush(rdev);
447
	rdev->gart.ready = true;
1128 serge 448
	return 0;
449
}
450
 
1221 serge 451
void r600_pcie_gart_disable(struct radeon_device *rdev)
1128 serge 452
{
1221 serge 453
	u32 tmp;
1321 serge 454
	int i, r;
1221 serge 455
 
456
	/* Disable all tables */
457
	for (i = 0; i < 7; i++)
458
		WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
459
 
460
	/* Disable L2 cache */
461
	WREG32(VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING |
462
				EFFECTIVE_L2_QUEUE_SIZE(7));
463
	WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
464
	/* Setup L1 TLB control */
465
	tmp = EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
466
		ENABLE_WAIT_L2_QUERY;
467
	WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
468
	WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
469
	WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
470
	WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
471
	WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
472
	WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
473
	WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
474
	WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
475
	WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp);
476
	WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp);
477
	WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
478
	WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
479
	WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp);
480
	WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
481
	if (rdev->gart.table.vram.robj) {
1403 serge 482
		r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
483
		if (likely(r == 0)) {
484
			radeon_bo_kunmap(rdev->gart.table.vram.robj);
485
			radeon_bo_unpin(rdev->gart.table.vram.robj);
486
			radeon_bo_unreserve(rdev->gart.table.vram.robj);
487
		}
1221 serge 488
	}
1128 serge 489
}
490
 
1221 serge 491
void r600_pcie_gart_fini(struct radeon_device *rdev)
492
{
493
	r600_pcie_gart_disable(rdev);
494
	radeon_gart_table_vram_free(rdev);
495
	radeon_gart_fini(rdev);
496
}
1128 serge 497
 
1221 serge 498
void r600_agp_enable(struct radeon_device *rdev)
1128 serge 499
{
1221 serge 500
	u32 tmp;
501
	int i;
502
 
503
	/* Setup L2 cache */
504
	WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
505
				ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
506
				EFFECTIVE_L2_QUEUE_SIZE(7));
507
	WREG32(VM_L2_CNTL2, 0);
508
	WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
509
	/* Setup TLB control */
510
	tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
511
		SYSTEM_ACCESS_MODE_NOT_IN_SYS |
512
		EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
513
		ENABLE_WAIT_L2_QUERY;
514
	WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
515
	WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
516
	WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp | ENABLE_L1_STRICT_ORDERING);
517
	WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
518
	WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
519
	WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
520
	WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
521
	WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
522
	WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
523
	WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
524
	WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
525
	WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
526
	WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
527
	WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
528
	for (i = 0; i < 7; i++)
529
		WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
1128 serge 530
}
531
 
532
int r600_mc_wait_for_idle(struct radeon_device *rdev)
533
{
1221 serge 534
	unsigned i;
535
	u32 tmp;
536
 
537
	for (i = 0; i < rdev->usec_timeout; i++) {
538
		/* read MC_STATUS */
539
		tmp = RREG32(R_000E50_SRBM_STATUS) & 0x3F00;
540
		if (!tmp)
1128 serge 541
	return 0;
1221 serge 542
		udelay(1);
543
	}
544
	return -1;
1128 serge 545
}
546
 
1221 serge 547
static void r600_mc_program(struct radeon_device *rdev)
1128 serge 548
{
1221 serge 549
	struct rv515_mc_save save;
550
	u32 tmp;
551
	int i, j;
552
 
553
	/* Initialize HDP */
554
	for (i = 0, j = 0; i < 32; i++, j += 0x18) {
555
		WREG32((0x2c14 + j), 0x00000000);
556
		WREG32((0x2c18 + j), 0x00000000);
557
		WREG32((0x2c1c + j), 0x00000000);
558
		WREG32((0x2c20 + j), 0x00000000);
559
		WREG32((0x2c24 + j), 0x00000000);
560
	}
561
	WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
562
 
563
	rv515_mc_stop(rdev, &save);
564
	if (r600_mc_wait_for_idle(rdev)) {
565
		dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
566
	}
567
	/* Lockout access through VGA aperture (doesn't exist before R600) */
568
	WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
569
	/* Update configuration */
570
	if (rdev->flags & RADEON_IS_AGP) {
571
		if (rdev->mc.vram_start < rdev->mc.gtt_start) {
572
			/* VRAM before AGP */
573
			WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
574
				rdev->mc.vram_start >> 12);
575
			WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
576
				rdev->mc.gtt_end >> 12);
577
		} else {
578
			/* VRAM after AGP */
579
			WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
580
				rdev->mc.gtt_start >> 12);
581
			WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
582
				rdev->mc.vram_end >> 12);
583
		}
584
	} else {
585
		WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start >> 12);
586
		WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.vram_end >> 12);
587
	}
588
	WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0);
589
	tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
590
	tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
591
	WREG32(MC_VM_FB_LOCATION, tmp);
592
	WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
593
	WREG32(HDP_NONSURFACE_INFO, (2 << 7));
594
	WREG32(HDP_NONSURFACE_SIZE, rdev->mc.mc_vram_size | 0x3FF);
595
	if (rdev->flags & RADEON_IS_AGP) {
596
		WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 22);
597
		WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 22);
598
		WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22);
599
	} else {
600
		WREG32(MC_VM_AGP_BASE, 0);
601
		WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
602
		WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
603
	}
604
	if (r600_mc_wait_for_idle(rdev)) {
605
		dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
606
	}
607
	rv515_mc_resume(rdev, &save);
608
	/* we need to own VRAM, so turn off the VGA renderer here
609
	 * to stop it overwriting our objects */
610
	rv515_vga_render_disable(rdev);
1128 serge 611
}
612
 
1430 serge 613
/**
614
 * r600_vram_gtt_location - try to find VRAM & GTT location
615
 * @rdev: radeon device structure holding all necessary informations
616
 * @mc: memory controller structure holding memory informations
617
 *
618
 * Function will place try to place VRAM at same place as in CPU (PCI)
619
 * address space as some GPU seems to have issue when we reprogram at
620
 * different address space.
621
 *
622
 * If there is not enough space to fit the unvisible VRAM after the
623
 * aperture then we limit the VRAM size to the aperture.
624
 *
625
 * If we are using AGP then place VRAM adjacent to AGP aperture are we need
626
 * them to be in one from GPU point of view so that we can program GPU to
627
 * catch access outside them (weird GPU policy see ??).
628
 *
629
 * This function will never fails, worst case are limiting VRAM or GTT.
630
 *
631
 * Note: GTT start, end, size should be initialized before calling this
632
 * function on AGP platform.
633
 */
634
void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
635
{
636
	u64 size_bf, size_af;
637
 
638
	if (mc->mc_vram_size > 0xE0000000) {
639
		/* leave room for at least 512M GTT */
640
		dev_warn(rdev->dev, "limiting VRAM\n");
641
		mc->real_vram_size = 0xE0000000;
642
		mc->mc_vram_size = 0xE0000000;
643
	}
644
	if (rdev->flags & RADEON_IS_AGP) {
645
		size_bf = mc->gtt_start;
646
		size_af = 0xFFFFFFFF - mc->gtt_end + 1;
647
		if (size_bf > size_af) {
648
			if (mc->mc_vram_size > size_bf) {
649
				dev_warn(rdev->dev, "limiting VRAM\n");
650
				mc->real_vram_size = size_bf;
651
				mc->mc_vram_size = size_bf;
652
			}
653
			mc->vram_start = mc->gtt_start - mc->mc_vram_size;
654
		} else {
655
			if (mc->mc_vram_size > size_af) {
656
				dev_warn(rdev->dev, "limiting VRAM\n");
657
				mc->real_vram_size = size_af;
658
				mc->mc_vram_size = size_af;
659
			}
660
			mc->vram_start = mc->gtt_end;
661
		}
662
		mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
663
		dev_info(rdev->dev, "VRAM: %lluM 0x%08llX - 0x%08llX (%lluM used)\n",
664
				mc->mc_vram_size >> 20, mc->vram_start,
665
				mc->vram_end, mc->real_vram_size >> 20);
666
	} else {
667
		u64 base = 0;
668
		if (rdev->flags & RADEON_IS_IGP)
669
			base = (RREG32(MC_VM_FB_LOCATION) & 0xFFFF) << 24;
670
		radeon_vram_location(rdev, &rdev->mc, base);
671
		radeon_gtt_location(rdev, mc);
672
	}
673
}
674
 
1221 serge 675
int r600_mc_init(struct radeon_device *rdev)
1128 serge 676
{
1221 serge 677
	fixed20_12 a;
678
	u32 tmp;
1268 serge 679
	int chansize, numchan;
1128 serge 680
 
1221 serge 681
	/* Get VRAM informations */
1128 serge 682
	rdev->mc.vram_is_ddr = true;
1221 serge 683
	tmp = RREG32(RAMCFG);
684
	if (tmp & CHANSIZE_OVERRIDE) {
1128 serge 685
		chansize = 16;
1221 serge 686
	} else if (tmp & CHANSIZE_MASK) {
1128 serge 687
		chansize = 64;
688
	} else {
689
		chansize = 32;
690
	}
1268 serge 691
	tmp = RREG32(CHMAP);
692
	switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
693
	case 0:
694
	default:
695
		numchan = 1;
696
		break;
697
	case 1:
698
		numchan = 2;
699
		break;
700
	case 2:
701
		numchan = 4;
702
		break;
703
	case 3:
704
		numchan = 8;
705
		break;
1128 serge 706
	}
1268 serge 707
	rdev->mc.vram_width = numchan * chansize;
1221 serge 708
	/* Could aper size report 0 ? */
709
	rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
710
	rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
711
	/* Setup GPU memory space */
712
	rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
713
	rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
1430 serge 714
	rdev->mc.visible_vram_size = rdev->mc.aper_size;
715
	/* FIXME remove this once we support unmappable VRAM */
716
	if (rdev->mc.mc_vram_size > rdev->mc.aper_size) {
1221 serge 717
		rdev->mc.mc_vram_size = rdev->mc.aper_size;
718
		rdev->mc.real_vram_size = rdev->mc.aper_size;
719
		}
1430 serge 720
	r600_vram_gtt_location(rdev, &rdev->mc);
1221 serge 721
	/* FIXME: we should enforce default clock in case GPU is not in
722
	 * default setup
723
	 */
724
	a.full = rfixed_const(100);
725
	rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk);
726
	rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
1403 serge 727
	if (rdev->flags & RADEON_IS_IGP)
728
		rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
1221 serge 729
	return 0;
1128 serge 730
}
731
 
1221 serge 732
/* We doesn't check that the GPU really needs a reset we simply do the
733
 * reset, it's up to the caller to determine if the GPU needs one. We
734
 * might add an helper function to check that.
735
 */
736
int r600_gpu_soft_reset(struct radeon_device *rdev)
1128 serge 737
{
1221 serge 738
	struct rv515_mc_save save;
739
	u32 grbm_busy_mask = S_008010_VC_BUSY(1) | S_008010_VGT_BUSY_NO_DMA(1) |
740
				S_008010_VGT_BUSY(1) | S_008010_TA03_BUSY(1) |
741
				S_008010_TC_BUSY(1) | S_008010_SX_BUSY(1) |
742
				S_008010_SH_BUSY(1) | S_008010_SPI03_BUSY(1) |
743
				S_008010_SMX_BUSY(1) | S_008010_SC_BUSY(1) |
744
				S_008010_PA_BUSY(1) | S_008010_DB03_BUSY(1) |
745
				S_008010_CR_BUSY(1) | S_008010_CB03_BUSY(1) |
746
				S_008010_GUI_ACTIVE(1);
747
	u32 grbm2_busy_mask = S_008014_SPI0_BUSY(1) | S_008014_SPI1_BUSY(1) |
748
			S_008014_SPI2_BUSY(1) | S_008014_SPI3_BUSY(1) |
749
			S_008014_TA0_BUSY(1) | S_008014_TA1_BUSY(1) |
750
			S_008014_TA2_BUSY(1) | S_008014_TA3_BUSY(1) |
751
			S_008014_DB0_BUSY(1) | S_008014_DB1_BUSY(1) |
752
			S_008014_DB2_BUSY(1) | S_008014_DB3_BUSY(1) |
753
			S_008014_CB0_BUSY(1) | S_008014_CB1_BUSY(1) |
754
			S_008014_CB2_BUSY(1) | S_008014_CB3_BUSY(1);
755
	u32 srbm_reset = 0;
756
	u32 tmp;
1128 serge 757
 
1221 serge 758
	dev_info(rdev->dev, "GPU softreset \n");
759
	dev_info(rdev->dev, "  R_008010_GRBM_STATUS=0x%08X\n",
760
		RREG32(R_008010_GRBM_STATUS));
761
	dev_info(rdev->dev, "  R_008014_GRBM_STATUS2=0x%08X\n",
762
		RREG32(R_008014_GRBM_STATUS2));
763
	dev_info(rdev->dev, "  R_000E50_SRBM_STATUS=0x%08X\n",
764
		RREG32(R_000E50_SRBM_STATUS));
765
	rv515_mc_stop(rdev, &save);
766
	if (r600_mc_wait_for_idle(rdev)) {
767
		dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
768
	}
769
	/* Disable CP parsing/prefetching */
770
	WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(0xff));
771
	/* Check if any of the rendering block is busy and reset it */
772
	if ((RREG32(R_008010_GRBM_STATUS) & grbm_busy_mask) ||
773
	    (RREG32(R_008014_GRBM_STATUS2) & grbm2_busy_mask)) {
774
		tmp = S_008020_SOFT_RESET_CR(1) |
775
			S_008020_SOFT_RESET_DB(1) |
776
			S_008020_SOFT_RESET_CB(1) |
777
			S_008020_SOFT_RESET_PA(1) |
778
			S_008020_SOFT_RESET_SC(1) |
779
			S_008020_SOFT_RESET_SMX(1) |
780
			S_008020_SOFT_RESET_SPI(1) |
781
			S_008020_SOFT_RESET_SX(1) |
782
			S_008020_SOFT_RESET_SH(1) |
783
			S_008020_SOFT_RESET_TC(1) |
784
			S_008020_SOFT_RESET_TA(1) |
785
			S_008020_SOFT_RESET_VC(1) |
786
			S_008020_SOFT_RESET_VGT(1);
787
		dev_info(rdev->dev, "  R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
788
		WREG32(R_008020_GRBM_SOFT_RESET, tmp);
789
		(void)RREG32(R_008020_GRBM_SOFT_RESET);
790
		udelay(50);
791
		WREG32(R_008020_GRBM_SOFT_RESET, 0);
792
		(void)RREG32(R_008020_GRBM_SOFT_RESET);
793
	}
794
	/* Reset CP (we always reset CP) */
795
	tmp = S_008020_SOFT_RESET_CP(1);
796
	dev_info(rdev->dev, "R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
797
	WREG32(R_008020_GRBM_SOFT_RESET, tmp);
798
	(void)RREG32(R_008020_GRBM_SOFT_RESET);
799
	udelay(50);
800
	WREG32(R_008020_GRBM_SOFT_RESET, 0);
801
	(void)RREG32(R_008020_GRBM_SOFT_RESET);
802
	/* Reset others GPU block if necessary */
803
	if (G_000E50_RLC_BUSY(RREG32(R_000E50_SRBM_STATUS)))
804
		srbm_reset |= S_000E60_SOFT_RESET_RLC(1);
805
	if (G_000E50_GRBM_RQ_PENDING(RREG32(R_000E50_SRBM_STATUS)))
806
		srbm_reset |= S_000E60_SOFT_RESET_GRBM(1);
807
	if (G_000E50_HI_RQ_PENDING(RREG32(R_000E50_SRBM_STATUS)))
808
		srbm_reset |= S_000E60_SOFT_RESET_IH(1);
809
	if (G_000E50_VMC_BUSY(RREG32(R_000E50_SRBM_STATUS)))
810
		srbm_reset |= S_000E60_SOFT_RESET_VMC(1);
811
	if (G_000E50_MCB_BUSY(RREG32(R_000E50_SRBM_STATUS)))
812
		srbm_reset |= S_000E60_SOFT_RESET_MC(1);
813
	if (G_000E50_MCDZ_BUSY(RREG32(R_000E50_SRBM_STATUS)))
814
		srbm_reset |= S_000E60_SOFT_RESET_MC(1);
815
	if (G_000E50_MCDY_BUSY(RREG32(R_000E50_SRBM_STATUS)))
816
		srbm_reset |= S_000E60_SOFT_RESET_MC(1);
817
	if (G_000E50_MCDX_BUSY(RREG32(R_000E50_SRBM_STATUS)))
818
		srbm_reset |= S_000E60_SOFT_RESET_MC(1);
819
	if (G_000E50_MCDW_BUSY(RREG32(R_000E50_SRBM_STATUS)))
820
		srbm_reset |= S_000E60_SOFT_RESET_MC(1);
821
	if (G_000E50_RLC_BUSY(RREG32(R_000E50_SRBM_STATUS)))
822
		srbm_reset |= S_000E60_SOFT_RESET_RLC(1);
823
	if (G_000E50_SEM_BUSY(RREG32(R_000E50_SRBM_STATUS)))
824
		srbm_reset |= S_000E60_SOFT_RESET_SEM(1);
825
	if (G_000E50_BIF_BUSY(RREG32(R_000E50_SRBM_STATUS)))
826
		srbm_reset |= S_000E60_SOFT_RESET_BIF(1);
827
	dev_info(rdev->dev, "  R_000E60_SRBM_SOFT_RESET=0x%08X\n", srbm_reset);
828
	WREG32(R_000E60_SRBM_SOFT_RESET, srbm_reset);
829
	(void)RREG32(R_000E60_SRBM_SOFT_RESET);
830
	udelay(50);
831
	WREG32(R_000E60_SRBM_SOFT_RESET, 0);
832
	(void)RREG32(R_000E60_SRBM_SOFT_RESET);
833
	WREG32(R_000E60_SRBM_SOFT_RESET, srbm_reset);
834
	(void)RREG32(R_000E60_SRBM_SOFT_RESET);
835
	udelay(50);
836
	WREG32(R_000E60_SRBM_SOFT_RESET, 0);
837
	(void)RREG32(R_000E60_SRBM_SOFT_RESET);
838
	/* Wait a little for things to settle down */
839
	udelay(50);
840
	dev_info(rdev->dev, "  R_008010_GRBM_STATUS=0x%08X\n",
841
		RREG32(R_008010_GRBM_STATUS));
842
	dev_info(rdev->dev, "  R_008014_GRBM_STATUS2=0x%08X\n",
843
		RREG32(R_008014_GRBM_STATUS2));
844
	dev_info(rdev->dev, "  R_000E50_SRBM_STATUS=0x%08X\n",
845
		RREG32(R_000E50_SRBM_STATUS));
846
	/* After reset we need to reinit the asic as GPU often endup in an
847
	 * incoherent state.
848
	 */
849
	atom_asic_init(rdev->mode_info.atom_context);
850
	rv515_mc_resume(rdev, &save);
851
	return 0;
1128 serge 852
}
853
 
1221 serge 854
int r600_gpu_reset(struct radeon_device *rdev)
855
{
856
	return r600_gpu_soft_reset(rdev);
857
}
858
 
859
static u32 r600_get_tile_pipe_to_backend_map(u32 num_tile_pipes,
860
					     u32 num_backends,
861
					     u32 backend_disable_mask)
862
{
863
	u32 backend_map = 0;
864
	u32 enabled_backends_mask;
865
	u32 enabled_backends_count;
866
	u32 cur_pipe;
867
	u32 swizzle_pipe[R6XX_MAX_PIPES];
868
	u32 cur_backend;
869
	u32 i;
870
 
871
	if (num_tile_pipes > R6XX_MAX_PIPES)
872
		num_tile_pipes = R6XX_MAX_PIPES;
873
	if (num_tile_pipes < 1)
874
		num_tile_pipes = 1;
875
	if (num_backends > R6XX_MAX_BACKENDS)
876
		num_backends = R6XX_MAX_BACKENDS;
877
	if (num_backends < 1)
878
		num_backends = 1;
879
 
880
	enabled_backends_mask = 0;
881
	enabled_backends_count = 0;
882
	for (i = 0; i < R6XX_MAX_BACKENDS; ++i) {
883
		if (((backend_disable_mask >> i) & 1) == 0) {
884
			enabled_backends_mask |= (1 << i);
885
			++enabled_backends_count;
886
		}
887
		if (enabled_backends_count == num_backends)
888
			break;
889
	}
890
 
891
	if (enabled_backends_count == 0) {
892
		enabled_backends_mask = 1;
893
		enabled_backends_count = 1;
894
	}
895
 
896
	if (enabled_backends_count != num_backends)
897
		num_backends = enabled_backends_count;
898
 
899
	memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * R6XX_MAX_PIPES);
900
	switch (num_tile_pipes) {
901
	case 1:
902
		swizzle_pipe[0] = 0;
903
		break;
904
	case 2:
905
		swizzle_pipe[0] = 0;
906
		swizzle_pipe[1] = 1;
907
		break;
908
	case 3:
909
		swizzle_pipe[0] = 0;
910
		swizzle_pipe[1] = 1;
911
		swizzle_pipe[2] = 2;
912
		break;
913
	case 4:
914
		swizzle_pipe[0] = 0;
915
		swizzle_pipe[1] = 1;
916
		swizzle_pipe[2] = 2;
917
		swizzle_pipe[3] = 3;
918
		break;
919
	case 5:
920
		swizzle_pipe[0] = 0;
921
		swizzle_pipe[1] = 1;
922
		swizzle_pipe[2] = 2;
923
		swizzle_pipe[3] = 3;
924
		swizzle_pipe[4] = 4;
925
		break;
926
	case 6:
927
		swizzle_pipe[0] = 0;
928
		swizzle_pipe[1] = 2;
929
		swizzle_pipe[2] = 4;
930
		swizzle_pipe[3] = 5;
931
		swizzle_pipe[4] = 1;
932
		swizzle_pipe[5] = 3;
933
		break;
934
	case 7:
935
		swizzle_pipe[0] = 0;
936
		swizzle_pipe[1] = 2;
937
		swizzle_pipe[2] = 4;
938
		swizzle_pipe[3] = 6;
939
		swizzle_pipe[4] = 1;
940
		swizzle_pipe[5] = 3;
941
		swizzle_pipe[6] = 5;
942
		break;
943
	case 8:
944
		swizzle_pipe[0] = 0;
945
		swizzle_pipe[1] = 2;
946
		swizzle_pipe[2] = 4;
947
		swizzle_pipe[3] = 6;
948
		swizzle_pipe[4] = 1;
949
		swizzle_pipe[5] = 3;
950
		swizzle_pipe[6] = 5;
951
		swizzle_pipe[7] = 7;
952
		break;
953
	}
954
 
955
	cur_backend = 0;
956
	for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) {
957
		while (((1 << cur_backend) & enabled_backends_mask) == 0)
958
			cur_backend = (cur_backend + 1) % R6XX_MAX_BACKENDS;
959
 
960
		backend_map |= (u32)(((cur_backend & 3) << (swizzle_pipe[cur_pipe] * 2)));
961
 
962
		cur_backend = (cur_backend + 1) % R6XX_MAX_BACKENDS;
963
	}
964
 
965
	return backend_map;
966
}
967
 
968
int r600_count_pipe_bits(uint32_t val)
969
{
970
	int i, ret = 0;
971
 
972
	for (i = 0; i < 32; i++) {
973
		ret += val & 1;
974
		val >>= 1;
975
	}
976
	return ret;
977
}
978
 
979
void r600_gpu_init(struct radeon_device *rdev)
980
{
981
	u32 tiling_config;
982
	u32 ramcfg;
1430 serge 983
	u32 backend_map;
984
	u32 cc_rb_backend_disable;
985
	u32 cc_gc_shader_pipe_config;
1221 serge 986
	u32 tmp;
987
	int i, j;
988
	u32 sq_config;
989
	u32 sq_gpr_resource_mgmt_1 = 0;
990
	u32 sq_gpr_resource_mgmt_2 = 0;
991
	u32 sq_thread_resource_mgmt = 0;
992
	u32 sq_stack_resource_mgmt_1 = 0;
993
	u32 sq_stack_resource_mgmt_2 = 0;
994
 
995
	/* FIXME: implement */
996
	switch (rdev->family) {
997
	case CHIP_R600:
998
		rdev->config.r600.max_pipes = 4;
999
		rdev->config.r600.max_tile_pipes = 8;
1000
		rdev->config.r600.max_simds = 4;
1001
		rdev->config.r600.max_backends = 4;
1002
		rdev->config.r600.max_gprs = 256;
1003
		rdev->config.r600.max_threads = 192;
1004
		rdev->config.r600.max_stack_entries = 256;
1005
		rdev->config.r600.max_hw_contexts = 8;
1006
		rdev->config.r600.max_gs_threads = 16;
1007
		rdev->config.r600.sx_max_export_size = 128;
1008
		rdev->config.r600.sx_max_export_pos_size = 16;
1009
		rdev->config.r600.sx_max_export_smx_size = 128;
1010
		rdev->config.r600.sq_num_cf_insts = 2;
1011
		break;
1012
	case CHIP_RV630:
1013
	case CHIP_RV635:
1014
		rdev->config.r600.max_pipes = 2;
1015
		rdev->config.r600.max_tile_pipes = 2;
1016
		rdev->config.r600.max_simds = 3;
1017
		rdev->config.r600.max_backends = 1;
1018
		rdev->config.r600.max_gprs = 128;
1019
		rdev->config.r600.max_threads = 192;
1020
		rdev->config.r600.max_stack_entries = 128;
1021
		rdev->config.r600.max_hw_contexts = 8;
1022
		rdev->config.r600.max_gs_threads = 4;
1023
		rdev->config.r600.sx_max_export_size = 128;
1024
		rdev->config.r600.sx_max_export_pos_size = 16;
1025
		rdev->config.r600.sx_max_export_smx_size = 128;
1026
		rdev->config.r600.sq_num_cf_insts = 2;
1027
		break;
1028
	case CHIP_RV610:
1029
	case CHIP_RV620:
1030
	case CHIP_RS780:
1031
	case CHIP_RS880:
1032
		rdev->config.r600.max_pipes = 1;
1033
		rdev->config.r600.max_tile_pipes = 1;
1034
		rdev->config.r600.max_simds = 2;
1035
		rdev->config.r600.max_backends = 1;
1036
		rdev->config.r600.max_gprs = 128;
1037
		rdev->config.r600.max_threads = 192;
1038
		rdev->config.r600.max_stack_entries = 128;
1039
		rdev->config.r600.max_hw_contexts = 4;
1040
		rdev->config.r600.max_gs_threads = 4;
1041
		rdev->config.r600.sx_max_export_size = 128;
1042
		rdev->config.r600.sx_max_export_pos_size = 16;
1043
		rdev->config.r600.sx_max_export_smx_size = 128;
1044
		rdev->config.r600.sq_num_cf_insts = 1;
1045
		break;
1046
	case CHIP_RV670:
1047
		rdev->config.r600.max_pipes = 4;
1048
		rdev->config.r600.max_tile_pipes = 4;
1049
		rdev->config.r600.max_simds = 4;
1050
		rdev->config.r600.max_backends = 4;
1051
		rdev->config.r600.max_gprs = 192;
1052
		rdev->config.r600.max_threads = 192;
1053
		rdev->config.r600.max_stack_entries = 256;
1054
		rdev->config.r600.max_hw_contexts = 8;
1055
		rdev->config.r600.max_gs_threads = 16;
1056
		rdev->config.r600.sx_max_export_size = 128;
1057
		rdev->config.r600.sx_max_export_pos_size = 16;
1058
		rdev->config.r600.sx_max_export_smx_size = 128;
1059
		rdev->config.r600.sq_num_cf_insts = 2;
1060
		break;
1061
	default:
1062
		break;
1063
	}
1064
 
1065
	/* Initialize HDP */
1066
	for (i = 0, j = 0; i < 32; i++, j += 0x18) {
1067
		WREG32((0x2c14 + j), 0x00000000);
1068
		WREG32((0x2c18 + j), 0x00000000);
1069
		WREG32((0x2c1c + j), 0x00000000);
1070
		WREG32((0x2c20 + j), 0x00000000);
1071
		WREG32((0x2c24 + j), 0x00000000);
1072
	}
1073
 
1074
	WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
1075
 
1076
	/* Setup tiling */
1077
	tiling_config = 0;
1078
	ramcfg = RREG32(RAMCFG);
1079
	switch (rdev->config.r600.max_tile_pipes) {
1080
	case 1:
1081
		tiling_config |= PIPE_TILING(0);
1082
		break;
1083
	case 2:
1084
		tiling_config |= PIPE_TILING(1);
1085
		break;
1086
	case 4:
1087
		tiling_config |= PIPE_TILING(2);
1088
		break;
1089
	case 8:
1090
		tiling_config |= PIPE_TILING(3);
1091
		break;
1092
	default:
1093
		break;
1094
	}
1430 serge 1095
	rdev->config.r600.tiling_npipes = rdev->config.r600.max_tile_pipes;
1096
	rdev->config.r600.tiling_nbanks = 4 << ((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
1221 serge 1097
	tiling_config |= BANK_TILING((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
1098
	tiling_config |= GROUP_SIZE(0);
1430 serge 1099
	rdev->config.r600.tiling_group_size = 256;
1221 serge 1100
	tmp = (ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT;
1101
	if (tmp > 3) {
1102
		tiling_config |= ROW_TILING(3);
1103
		tiling_config |= SAMPLE_SPLIT(3);
1104
	} else {
1105
		tiling_config |= ROW_TILING(tmp);
1106
		tiling_config |= SAMPLE_SPLIT(tmp);
1107
	}
1108
	tiling_config |= BANK_SWAPS(1);
1430 serge 1109
 
1110
	cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000;
1111
	cc_rb_backend_disable |=
1112
		BACKEND_DISABLE((R6XX_MAX_BACKENDS_MASK << rdev->config.r600.max_backends) & R6XX_MAX_BACKENDS_MASK);
1113
 
1114
	cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00;
1115
	cc_gc_shader_pipe_config |=
1116
		INACTIVE_QD_PIPES((R6XX_MAX_PIPES_MASK << rdev->config.r600.max_pipes) & R6XX_MAX_PIPES_MASK);
1117
	cc_gc_shader_pipe_config |=
1118
		INACTIVE_SIMDS((R6XX_MAX_SIMDS_MASK << rdev->config.r600.max_simds) & R6XX_MAX_SIMDS_MASK);
1119
 
1120
	backend_map = r600_get_tile_pipe_to_backend_map(rdev->config.r600.max_tile_pipes,
1121
							(R6XX_MAX_BACKENDS -
1122
							 r600_count_pipe_bits((cc_rb_backend_disable &
1123
									       R6XX_MAX_BACKENDS_MASK) >> 16)),
1124
							(cc_rb_backend_disable >> 16));
1125
 
1126
	tiling_config |= BACKEND_MAP(backend_map);
1221 serge 1127
	WREG32(GB_TILING_CONFIG, tiling_config);
1128
	WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff);
1129
	WREG32(HDP_TILING_CONFIG, tiling_config & 0xffff);
1130
 
1131
	/* Setup pipes */
1430 serge 1132
	WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
1133
	WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
1221 serge 1134
 
1430 serge 1135
	tmp = R6XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
1221 serge 1136
	WREG32(VGT_OUT_DEALLOC_CNTL, (tmp * 4) & DEALLOC_DIST_MASK);
1137
	WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((tmp * 4) - 2) & VTX_REUSE_DEPTH_MASK);
1138
 
1139
	/* Setup some CP states */
1140
	WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) | ROQ_IB2_START(0x2b)));
1141
	WREG32(CP_MEQ_THRESHOLDS, (MEQ_END(0x40) | ROQ_END(0x40)));
1142
 
1143
	WREG32(TA_CNTL_AUX, (DISABLE_CUBE_ANISO | SYNC_GRADIENT |
1144
			     SYNC_WALKER | SYNC_ALIGNER));
1145
	/* Setup various GPU states */
1146
	if (rdev->family == CHIP_RV670)
1147
		WREG32(ARB_GDEC_RD_CNTL, 0x00000021);
1148
 
1149
	tmp = RREG32(SX_DEBUG_1);
1150
	tmp |= SMX_EVENT_RELEASE;
1151
	if ((rdev->family > CHIP_R600))
1152
		tmp |= ENABLE_NEW_SMX_ADDRESS;
1153
	WREG32(SX_DEBUG_1, tmp);
1154
 
1155
	if (((rdev->family) == CHIP_R600) ||
1156
	    ((rdev->family) == CHIP_RV630) ||
1157
	    ((rdev->family) == CHIP_RV610) ||
1158
	    ((rdev->family) == CHIP_RV620) ||
1268 serge 1159
	    ((rdev->family) == CHIP_RS780) ||
1160
	    ((rdev->family) == CHIP_RS880)) {
1221 serge 1161
		WREG32(DB_DEBUG, PREZ_MUST_WAIT_FOR_POSTZ_DONE);
1162
	} else {
1163
		WREG32(DB_DEBUG, 0);
1164
	}
1165
	WREG32(DB_WATERMARKS, (DEPTH_FREE(4) | DEPTH_CACHELINE_FREE(16) |
1166
			       DEPTH_FLUSH(16) | DEPTH_PENDING_FREE(4)));
1167
 
1168
	WREG32(PA_SC_MULTI_CHIP_CNTL, 0);
1169
	WREG32(VGT_NUM_INSTANCES, 0);
1170
 
1171
	WREG32(SPI_CONFIG_CNTL, GPR_WRITE_PRIORITY(0));
1172
	WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(0));
1173
 
1174
	tmp = RREG32(SQ_MS_FIFO_SIZES);
1175
	if (((rdev->family) == CHIP_RV610) ||
1176
	    ((rdev->family) == CHIP_RV620) ||
1268 serge 1177
	    ((rdev->family) == CHIP_RS780) ||
1178
	    ((rdev->family) == CHIP_RS880)) {
1221 serge 1179
		tmp = (CACHE_FIFO_SIZE(0xa) |
1180
		       FETCH_FIFO_HIWATER(0xa) |
1181
		       DONE_FIFO_HIWATER(0xe0) |
1182
		       ALU_UPDATE_FIFO_HIWATER(0x8));
1183
	} else if (((rdev->family) == CHIP_R600) ||
1184
		   ((rdev->family) == CHIP_RV630)) {
1185
		tmp &= ~DONE_FIFO_HIWATER(0xff);
1186
		tmp |= DONE_FIFO_HIWATER(0x4);
1187
	}
1188
	WREG32(SQ_MS_FIFO_SIZES, tmp);
1189
 
1190
	/* SQ_CONFIG, SQ_GPR_RESOURCE_MGMT, SQ_THREAD_RESOURCE_MGMT, SQ_STACK_RESOURCE_MGMT
1191
	 * should be adjusted as needed by the 2D/3D drivers.  This just sets default values
1192
	 */
1193
	sq_config = RREG32(SQ_CONFIG);
1194
	sq_config &= ~(PS_PRIO(3) |
1195
		       VS_PRIO(3) |
1196
		       GS_PRIO(3) |
1197
		       ES_PRIO(3));
1198
	sq_config |= (DX9_CONSTS |
1199
		      VC_ENABLE |
1200
		      PS_PRIO(0) |
1201
		      VS_PRIO(1) |
1202
		      GS_PRIO(2) |
1203
		      ES_PRIO(3));
1204
 
1205
	if ((rdev->family) == CHIP_R600) {
1206
		sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(124) |
1207
					  NUM_VS_GPRS(124) |
1208
					  NUM_CLAUSE_TEMP_GPRS(4));
1209
		sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(0) |
1210
					  NUM_ES_GPRS(0));
1211
		sq_thread_resource_mgmt = (NUM_PS_THREADS(136) |
1212
					   NUM_VS_THREADS(48) |
1213
					   NUM_GS_THREADS(4) |
1214
					   NUM_ES_THREADS(4));
1215
		sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(128) |
1216
					    NUM_VS_STACK_ENTRIES(128));
1217
		sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(0) |
1218
					    NUM_ES_STACK_ENTRIES(0));
1219
	} else if (((rdev->family) == CHIP_RV610) ||
1220
		   ((rdev->family) == CHIP_RV620) ||
1268 serge 1221
		   ((rdev->family) == CHIP_RS780) ||
1222
		   ((rdev->family) == CHIP_RS880)) {
1221 serge 1223
		/* no vertex cache */
1224
		sq_config &= ~VC_ENABLE;
1225
 
1226
		sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
1227
					  NUM_VS_GPRS(44) |
1228
					  NUM_CLAUSE_TEMP_GPRS(2));
1229
		sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(17) |
1230
					  NUM_ES_GPRS(17));
1231
		sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
1232
					   NUM_VS_THREADS(78) |
1233
					   NUM_GS_THREADS(4) |
1234
					   NUM_ES_THREADS(31));
1235
		sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(40) |
1236
					    NUM_VS_STACK_ENTRIES(40));
1237
		sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(32) |
1238
					    NUM_ES_STACK_ENTRIES(16));
1239
	} else if (((rdev->family) == CHIP_RV630) ||
1240
		   ((rdev->family) == CHIP_RV635)) {
1241
		sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
1242
					  NUM_VS_GPRS(44) |
1243
					  NUM_CLAUSE_TEMP_GPRS(2));
1244
		sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(18) |
1245
					  NUM_ES_GPRS(18));
1246
		sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
1247
					   NUM_VS_THREADS(78) |
1248
					   NUM_GS_THREADS(4) |
1249
					   NUM_ES_THREADS(31));
1250
		sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(40) |
1251
					    NUM_VS_STACK_ENTRIES(40));
1252
		sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(32) |
1253
					    NUM_ES_STACK_ENTRIES(16));
1254
	} else if ((rdev->family) == CHIP_RV670) {
1255
		sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
1256
					  NUM_VS_GPRS(44) |
1257
					  NUM_CLAUSE_TEMP_GPRS(2));
1258
		sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(17) |
1259
					  NUM_ES_GPRS(17));
1260
		sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
1261
					   NUM_VS_THREADS(78) |
1262
					   NUM_GS_THREADS(4) |
1263
					   NUM_ES_THREADS(31));
1264
		sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(64) |
1265
					    NUM_VS_STACK_ENTRIES(64));
1266
		sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(64) |
1267
					    NUM_ES_STACK_ENTRIES(64));
1268
	}
1269
 
1270
	WREG32(SQ_CONFIG, sq_config);
1271
	WREG32(SQ_GPR_RESOURCE_MGMT_1,  sq_gpr_resource_mgmt_1);
1272
	WREG32(SQ_GPR_RESOURCE_MGMT_2,  sq_gpr_resource_mgmt_2);
1273
	WREG32(SQ_THREAD_RESOURCE_MGMT, sq_thread_resource_mgmt);
1274
	WREG32(SQ_STACK_RESOURCE_MGMT_1, sq_stack_resource_mgmt_1);
1275
	WREG32(SQ_STACK_RESOURCE_MGMT_2, sq_stack_resource_mgmt_2);
1276
 
1277
	if (((rdev->family) == CHIP_RV610) ||
1278
	    ((rdev->family) == CHIP_RV620) ||
1268 serge 1279
	    ((rdev->family) == CHIP_RS780) ||
1280
	    ((rdev->family) == CHIP_RS880)) {
1221 serge 1281
		WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(TC_ONLY));
1282
	} else {
1283
		WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC));
1284
	}
1285
 
1286
	/* More default values. 2D/3D driver should adjust as needed */
1287
	WREG32(PA_SC_AA_SAMPLE_LOCS_2S, (S0_X(0xc) | S0_Y(0x4) |
1288
					 S1_X(0x4) | S1_Y(0xc)));
1289
	WREG32(PA_SC_AA_SAMPLE_LOCS_4S, (S0_X(0xe) | S0_Y(0xe) |
1290
					 S1_X(0x2) | S1_Y(0x2) |
1291
					 S2_X(0xa) | S2_Y(0x6) |
1292
					 S3_X(0x6) | S3_Y(0xa)));
1293
	WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD0, (S0_X(0xe) | S0_Y(0xb) |
1294
					     S1_X(0x4) | S1_Y(0xc) |
1295
					     S2_X(0x1) | S2_Y(0x6) |
1296
					     S3_X(0xa) | S3_Y(0xe)));
1297
	WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD1, (S4_X(0x6) | S4_Y(0x1) |
1298
					     S5_X(0x0) | S5_Y(0x0) |
1299
					     S6_X(0xb) | S6_Y(0x4) |
1300
					     S7_X(0x7) | S7_Y(0x8)));
1301
 
1302
	WREG32(VGT_STRMOUT_EN, 0);
1303
	tmp = rdev->config.r600.max_pipes * 16;
1304
	switch (rdev->family) {
1305
	case CHIP_RV610:
1268 serge 1306
	case CHIP_RV620:
1221 serge 1307
	case CHIP_RS780:
1268 serge 1308
	case CHIP_RS880:
1221 serge 1309
		tmp += 32;
1310
		break;
1311
	case CHIP_RV670:
1312
		tmp += 128;
1313
		break;
1314
	default:
1315
		break;
1316
	}
1317
	if (tmp > 256) {
1318
		tmp = 256;
1319
	}
1320
	WREG32(VGT_ES_PER_GS, 128);
1321
	WREG32(VGT_GS_PER_ES, tmp);
1322
	WREG32(VGT_GS_PER_VS, 2);
1323
	WREG32(VGT_GS_VERTEX_REUSE, 16);
1324
 
1325
	/* more default values. 2D/3D driver should adjust as needed */
1326
	WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
1327
	WREG32(VGT_STRMOUT_EN, 0);
1328
	WREG32(SX_MISC, 0);
1329
	WREG32(PA_SC_MODE_CNTL, 0);
1330
	WREG32(PA_SC_AA_CONFIG, 0);
1331
	WREG32(PA_SC_LINE_STIPPLE, 0);
1332
	WREG32(SPI_INPUT_Z, 0);
1333
	WREG32(SPI_PS_IN_CONTROL_0, NUM_INTERP(2));
1334
	WREG32(CB_COLOR7_FRAG, 0);
1335
 
1336
	/* Clear render buffer base addresses */
1337
	WREG32(CB_COLOR0_BASE, 0);
1338
	WREG32(CB_COLOR1_BASE, 0);
1339
	WREG32(CB_COLOR2_BASE, 0);
1340
	WREG32(CB_COLOR3_BASE, 0);
1341
	WREG32(CB_COLOR4_BASE, 0);
1342
	WREG32(CB_COLOR5_BASE, 0);
1343
	WREG32(CB_COLOR6_BASE, 0);
1344
	WREG32(CB_COLOR7_BASE, 0);
1345
	WREG32(CB_COLOR7_FRAG, 0);
1346
 
1347
	switch (rdev->family) {
1348
	case CHIP_RV610:
1268 serge 1349
	case CHIP_RV620:
1221 serge 1350
	case CHIP_RS780:
1268 serge 1351
	case CHIP_RS880:
1221 serge 1352
		tmp = TC_L2_SIZE(8);
1353
		break;
1354
	case CHIP_RV630:
1355
	case CHIP_RV635:
1356
		tmp = TC_L2_SIZE(4);
1357
		break;
1358
	case CHIP_R600:
1359
		tmp = TC_L2_SIZE(0) | L2_DISABLE_LATE_HIT;
1360
		break;
1361
	default:
1362
		tmp = TC_L2_SIZE(0);
1363
		break;
1364
	}
1365
	WREG32(TC_CNTL, tmp);
1366
 
1367
	tmp = RREG32(HDP_HOST_PATH_CNTL);
1368
	WREG32(HDP_HOST_PATH_CNTL, tmp);
1369
 
1370
	tmp = RREG32(ARB_POP);
1371
	tmp |= ENABLE_TC128;
1372
	WREG32(ARB_POP, tmp);
1373
 
1374
	WREG32(PA_SC_MULTI_CHIP_CNTL, 0);
1375
	WREG32(PA_CL_ENHANCE, (CLIP_VTX_REORDER_ENA |
1376
			       NUM_CLIP_SEQ(3)));
1377
	WREG32(PA_SC_ENHANCE, FORCE_EOV_MAX_CLK_CNT(4095));
1378
}
1379
 
1380
 
1128 serge 1381
/*
1382
 * Indirect registers accessor
1383
 */
1221 serge 1384
u32 r600_pciep_rreg(struct radeon_device *rdev, u32 reg)
1128 serge 1385
{
1221 serge 1386
	u32 r;
1128 serge 1387
 
1221 serge 1388
	WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
1389
	(void)RREG32(PCIE_PORT_INDEX);
1390
	r = RREG32(PCIE_PORT_DATA);
1128 serge 1391
	return r;
1392
}
1393
 
1221 serge 1394
void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v)
1128 serge 1395
{
1221 serge 1396
	WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
1397
	(void)RREG32(PCIE_PORT_INDEX);
1398
	WREG32(PCIE_PORT_DATA, (v));
1399
	(void)RREG32(PCIE_PORT_DATA);
1128 serge 1400
}
1221 serge 1401
 
1402
/*
1403
 * CP & Ring
1404
 */
1405
void r600_cp_stop(struct radeon_device *rdev)
1406
{
1407
	WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
1408
}
1413 serge 1409
 
1410
int r600_init_microcode(struct radeon_device *rdev)
1411
{
1412
	struct platform_device *pdev;
1413
	const char *chip_name;
1414
	const char *rlc_chip_name;
1415
	size_t pfp_req_size, me_req_size, rlc_req_size;
1416
	char fw_name[30];
1417
	int err;
1418
 
1419
	DRM_DEBUG("\n");
1420
 
1421
	pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0);
1422
	err = IS_ERR(pdev);
1423
	if (err) {
1424
		printk(KERN_ERR "radeon_cp: Failed to register firmware\n");
1425
		return -EINVAL;
1426
	}
1427
 
1428
	switch (rdev->family) {
1429
	case CHIP_R600:
1430
		chip_name = "R600";
1431
		rlc_chip_name = "R600";
1432
		break;
1433
	case CHIP_RV610:
1434
		chip_name = "RV610";
1435
		rlc_chip_name = "R600";
1436
		break;
1437
	case CHIP_RV630:
1438
		chip_name = "RV630";
1439
		rlc_chip_name = "R600";
1440
		break;
1441
	case CHIP_RV620:
1442
		chip_name = "RV620";
1443
		rlc_chip_name = "R600";
1444
		break;
1445
	case CHIP_RV635:
1446
		chip_name = "RV635";
1447
		rlc_chip_name = "R600";
1448
		break;
1449
	case CHIP_RV670:
1450
		chip_name = "RV670";
1451
		rlc_chip_name = "R600";
1452
		break;
1453
	case CHIP_RS780:
1454
	case CHIP_RS880:
1455
		chip_name = "RS780";
1456
		rlc_chip_name = "R600";
1457
		break;
1458
	case CHIP_RV770:
1459
		chip_name = "RV770";
1460
		rlc_chip_name = "R700";
1461
		break;
1462
	case CHIP_RV730:
1463
	case CHIP_RV740:
1464
		chip_name = "RV730";
1465
		rlc_chip_name = "R700";
1466
		break;
1467
	case CHIP_RV710:
1468
		chip_name = "RV710";
1469
		rlc_chip_name = "R700";
1470
		break;
1471
	default: BUG();
1472
	}
1473
 
1474
	if (rdev->family >= CHIP_RV770) {
1475
		pfp_req_size = R700_PFP_UCODE_SIZE * 4;
1476
		me_req_size = R700_PM4_UCODE_SIZE * 4;
1477
		rlc_req_size = R700_RLC_UCODE_SIZE * 4;
1478
	} else {
1479
		pfp_req_size = PFP_UCODE_SIZE * 4;
1480
		me_req_size = PM4_UCODE_SIZE * 12;
1481
		rlc_req_size = RLC_UCODE_SIZE * 4;
1482
	}
1483
 
1484
	DRM_INFO("Loading %s Microcode\n", chip_name);
1485
 
1486
	snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
1487
	err = request_firmware(&rdev->pfp_fw, fw_name, &pdev->dev);
1488
	if (err)
1489
		goto out;
1490
	if (rdev->pfp_fw->size != pfp_req_size) {
1491
		printk(KERN_ERR
1492
		       "r600_cp: Bogus length %zu in firmware \"%s\"\n",
1493
		       rdev->pfp_fw->size, fw_name);
1494
		err = -EINVAL;
1495
		goto out;
1496
	}
1497
 
1498
	snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
1499
	err = request_firmware(&rdev->me_fw, fw_name, &pdev->dev);
1500
	if (err)
1501
		goto out;
1502
	if (rdev->me_fw->size != me_req_size) {
1503
		printk(KERN_ERR
1504
		       "r600_cp: Bogus length %zu in firmware \"%s\"\n",
1505
		       rdev->me_fw->size, fw_name);
1506
		err = -EINVAL;
1507
	}
1508
 
1509
	snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name);
1510
	err = request_firmware(&rdev->rlc_fw, fw_name, &pdev->dev);
1511
	if (err)
1512
		goto out;
1513
	if (rdev->rlc_fw->size != rlc_req_size) {
1514
		printk(KERN_ERR
1515
		       "r600_rlc: Bogus length %zu in firmware \"%s\"\n",
1516
		       rdev->rlc_fw->size, fw_name);
1517
		err = -EINVAL;
1518
	}
1519
 
1520
out:
1521
	platform_device_unregister(pdev);
1522
 
1523
	if (err) {
1524
		if (err != -EINVAL)
1525
			printk(KERN_ERR
1526
			       "r600_cp: Failed to load firmware \"%s\"\n",
1527
			       fw_name);
1528
		release_firmware(rdev->pfp_fw);
1529
		rdev->pfp_fw = NULL;
1530
		release_firmware(rdev->me_fw);
1531
		rdev->me_fw = NULL;
1532
		release_firmware(rdev->rlc_fw);
1533
		rdev->rlc_fw = NULL;
1534
	}
1535
	return err;
1536
}
1537
 
1538
static int r600_cp_load_microcode(struct radeon_device *rdev)
1539
{
1540
	const __be32 *fw_data;
1541
	int i;
1542
 
1543
	if (!rdev->me_fw || !rdev->pfp_fw)
1544
		return -EINVAL;
1545
 
1546
	r600_cp_stop(rdev);
1547
 
1548
	WREG32(CP_RB_CNTL, RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3));
1549
 
1550
	/* Reset cp */
1551
	WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
1552
	RREG32(GRBM_SOFT_RESET);
1553
	mdelay(15);
1554
	WREG32(GRBM_SOFT_RESET, 0);
1555
 
1556
	WREG32(CP_ME_RAM_WADDR, 0);
1557
 
1558
	fw_data = (const __be32 *)rdev->me_fw->data;
1559
	WREG32(CP_ME_RAM_WADDR, 0);
1560
	for (i = 0; i < PM4_UCODE_SIZE * 3; i++)
1561
		WREG32(CP_ME_RAM_DATA,
1562
		       be32_to_cpup(fw_data++));
1563
 
1564
	fw_data = (const __be32 *)rdev->pfp_fw->data;
1565
	WREG32(CP_PFP_UCODE_ADDR, 0);
1566
	for (i = 0; i < PFP_UCODE_SIZE; i++)
1567
		WREG32(CP_PFP_UCODE_DATA,
1568
		       be32_to_cpup(fw_data++));
1569
 
1570
	WREG32(CP_PFP_UCODE_ADDR, 0);
1571
	WREG32(CP_ME_RAM_WADDR, 0);
1572
	WREG32(CP_ME_RAM_RADDR, 0);
1573
	return 0;
1574
}
1575
 
1221 serge 1576
int r600_cp_start(struct radeon_device *rdev)
1577
{
1578
	int r;
1579
	uint32_t cp_me;
1580
 
1581
	r = radeon_ring_lock(rdev, 7);
1582
	if (r) {
1583
		DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
1584
		return r;
1585
	}
1586
	radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5));
1587
	radeon_ring_write(rdev, 0x1);
1588
	if (rdev->family < CHIP_RV770) {
1589
		radeon_ring_write(rdev, 0x3);
1590
		radeon_ring_write(rdev, rdev->config.r600.max_hw_contexts - 1);
1591
	} else {
1592
		radeon_ring_write(rdev, 0x0);
1593
		radeon_ring_write(rdev, rdev->config.rv770.max_hw_contexts - 1);
1594
	}
1595
	radeon_ring_write(rdev, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
1596
	radeon_ring_write(rdev, 0);
1597
	radeon_ring_write(rdev, 0);
1598
	radeon_ring_unlock_commit(rdev);
1599
 
1600
	cp_me = 0xff;
1601
	WREG32(R_0086D8_CP_ME_CNTL, cp_me);
1602
	return 0;
1603
}
1413 serge 1604
 
1605
int r600_cp_resume(struct radeon_device *rdev)
1606
{
1607
	u32 tmp;
1608
	u32 rb_bufsz;
1609
	int r;
1610
 
1611
	/* Reset cp */
1612
	WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
1613
	RREG32(GRBM_SOFT_RESET);
1614
	mdelay(15);
1615
	WREG32(GRBM_SOFT_RESET, 0);
1616
 
1617
	/* Set ring buffer size */
1618
	rb_bufsz = drm_order(rdev->cp.ring_size / 8);
1619
	tmp = RB_NO_UPDATE | (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
1620
#ifdef __BIG_ENDIAN
1621
	tmp |= BUF_SWAP_32BIT;
1622
#endif
1623
	WREG32(CP_RB_CNTL, tmp);
1624
	WREG32(CP_SEM_WAIT_TIMER, 0x4);
1625
 
1626
	/* Set the write pointer delay */
1627
	WREG32(CP_RB_WPTR_DELAY, 0);
1628
 
1629
	/* Initialize the ring buffer's read and write pointers */
1630
	WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
1631
	WREG32(CP_RB_RPTR_WR, 0);
1632
	WREG32(CP_RB_WPTR, 0);
1633
	WREG32(CP_RB_RPTR_ADDR, rdev->cp.gpu_addr & 0xFFFFFFFF);
1634
	WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->cp.gpu_addr));
1635
	mdelay(1);
1636
	WREG32(CP_RB_CNTL, tmp);
1637
 
1638
	WREG32(CP_RB_BASE, rdev->cp.gpu_addr >> 8);
1639
	WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
1640
 
1641
	rdev->cp.rptr = RREG32(CP_RB_RPTR);
1642
	rdev->cp.wptr = RREG32(CP_RB_WPTR);
1643
 
1644
	r600_cp_start(rdev);
1645
	rdev->cp.ready = true;
1646
	r = radeon_ring_test(rdev);
1647
	if (r) {
1648
		rdev->cp.ready = false;
1649
		return r;
1650
	}
1651
	return 0;
1652
}
1653
 
1221 serge 1654
void r600_cp_commit(struct radeon_device *rdev)
1655
{
1656
	WREG32(CP_RB_WPTR, rdev->cp.wptr);
1657
	(void)RREG32(CP_RB_WPTR);
1658
}
1659
 
1233 serge 1660
void r600_ring_init(struct radeon_device *rdev, unsigned ring_size)
1661
{
1662
	u32 rb_bufsz;
1221 serge 1663
 
1233 serge 1664
	/* Align ring size */
1665
	rb_bufsz = drm_order(ring_size / 8);
1666
	ring_size = (1 << (rb_bufsz + 1)) * 4;
1667
	rdev->cp.ring_size = ring_size;
1668
	rdev->cp.align_mask = 16 - 1;
1669
}
1670
 
1671
 
1672
/*
1673
 * GPU scratch registers helpers function.
1674
 */
1675
void r600_scratch_init(struct radeon_device *rdev)
1676
{
1677
	int i;
1678
 
1679
	rdev->scratch.num_reg = 7;
1680
	for (i = 0; i < rdev->scratch.num_reg; i++) {
1681
		rdev->scratch.free[i] = true;
1682
		rdev->scratch.reg[i] = SCRATCH_REG0 + (i * 4);
1683
	}
1684
}
1413 serge 1685
 
1686
int r600_ring_test(struct radeon_device *rdev)
1687
{
1688
	uint32_t scratch;
1689
	uint32_t tmp = 0;
1690
	unsigned i;
1691
	int r;
1692
 
1693
	r = radeon_scratch_get(rdev, &scratch);
1694
	if (r) {
1695
		DRM_ERROR("radeon: cp failed to get scratch reg (%d).\n", r);
1696
		return r;
1697
	}
1698
	WREG32(scratch, 0xCAFEDEAD);
1699
	r = radeon_ring_lock(rdev, 3);
1700
	if (r) {
1701
		DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
1702
		radeon_scratch_free(rdev, scratch);
1703
		return r;
1704
	}
1705
	radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
1706
	radeon_ring_write(rdev, ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
1707
	radeon_ring_write(rdev, 0xDEADBEEF);
1708
	radeon_ring_unlock_commit(rdev);
1709
	for (i = 0; i < rdev->usec_timeout; i++) {
1710
		tmp = RREG32(scratch);
1711
		if (tmp == 0xDEADBEEF)
1712
			break;
1713
		DRM_UDELAY(1);
1714
	}
1715
	if (i < rdev->usec_timeout) {
1716
		DRM_INFO("ring test succeeded in %d usecs\n", i);
1717
	} else {
1718
		DRM_ERROR("radeon: ring test failed (scratch(0x%04X)=0x%08X)\n",
1719
			  scratch, tmp);
1720
		r = -EINVAL;
1721
	}
1722
	radeon_scratch_free(rdev, scratch);
1723
	return r;
1724
}
1725
void r600_fence_ring_emit(struct radeon_device *rdev,
1726
			  struct radeon_fence *fence)
1727
{
1728
	/* Also consider EVENT_WRITE_EOP.  it handles the interrupts + timestamps + events */
1430 serge 1729
 
1730
	radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE, 0));
1731
	radeon_ring_write(rdev, CACHE_FLUSH_AND_INV_EVENT);
1732
	/* wait for 3D idle clean */
1733
	radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
1734
	radeon_ring_write(rdev, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
1735
	radeon_ring_write(rdev, WAIT_3D_IDLE_bit | WAIT_3D_IDLECLEAN_bit);
1413 serge 1736
	/* Emit fence sequence & fire IRQ */
1737
	radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
1738
	radeon_ring_write(rdev, ((rdev->fence_drv.scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
1739
	radeon_ring_write(rdev, fence->seq);
1740
	/* CP_INTERRUPT packet 3 no longer exists, use packet 0 */
1741
	radeon_ring_write(rdev, PACKET0(CP_INT_STATUS, 0));
1742
	radeon_ring_write(rdev, RB_INT_STAT);
1743
}
1221 serge 1744
int r600_set_surface_reg(struct radeon_device *rdev, int reg,
1745
			 uint32_t tiling_flags, uint32_t pitch,
1746
			 uint32_t offset, uint32_t obj_size)
1747
{
1748
	/* FIXME: implement */
1749
	return 0;
1750
}
1751
 
1752
void r600_clear_surface_reg(struct radeon_device *rdev, int reg)
1753
{
1754
	/* FIXME: implement */
1755
}
1756
 
1757
 
1758
bool r600_card_posted(struct radeon_device *rdev)
1759
{
1760
	uint32_t reg;
1761
 
1762
	/* first check CRTCs */
1763
	reg = RREG32(D1CRTC_CONTROL) |
1764
		RREG32(D2CRTC_CONTROL);
1765
	if (reg & CRTC_EN)
1766
		return true;
1767
 
1768
	/* then check MEM_SIZE, in case the crtcs are off */
1769
	if (RREG32(CONFIG_MEMSIZE))
1770
		return true;
1771
 
1772
	return false;
1773
}
1774
 
1775
int r600_startup(struct radeon_device *rdev)
1776
{
1777
	int r;
1778
 
1413 serge 1779
	if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
1780
		r = r600_init_microcode(rdev);
1781
		if (r) {
1782
			DRM_ERROR("Failed to load firmware!\n");
1783
			return r;
1784
		}
1785
	}
1786
 
1221 serge 1787
	r600_mc_program(rdev);
1788
	if (rdev->flags & RADEON_IS_AGP) {
1789
		r600_agp_enable(rdev);
1790
	} else {
1791
		r = r600_pcie_gart_enable(rdev);
1792
		if (r)
1793
			return r;
1794
	}
1795
	r600_gpu_init(rdev);
1796
 
1413 serge 1797
	r = radeon_ring_init(rdev, rdev->cp.ring_size);
1798
	if (r)
1799
		return r;
1800
	r = r600_cp_load_microcode(rdev);
1801
	if (r)
1802
		return r;
1803
	r = r600_cp_resume(rdev);
1804
	if (r)
1805
		return r;
1221 serge 1806
	/* write back buffer are not vital so don't worry about failure */
1233 serge 1807
//	r600_wb_enable(rdev);
1221 serge 1808
	return 0;
1809
}
1810
 
1811
void r600_vga_set_state(struct radeon_device *rdev, bool state)
1812
{
1813
	uint32_t temp;
1814
 
1815
	temp = RREG32(CONFIG_CNTL);
1816
	if (state == false) {
1817
		temp &= ~(1<<0);
1818
		temp |= (1<<1);
1819
	} else {
1820
		temp &= ~(1<<1);
1821
	}
1822
	WREG32(CONFIG_CNTL, temp);
1823
}
1824
 
1825
 
1826
 
1827
 
1828
 
1829
/* Plan is to move initialization in that function and use
1830
 * helper function so that radeon_device_init pretty much
1831
 * do nothing more than calling asic specific function. This
1832
 * should also allow to remove a bunch of callback function
1833
 * like vram_info.
1834
 */
1835
int r600_init(struct radeon_device *rdev)
1836
{
1837
	int r;
1838
 
1839
	r = radeon_dummy_page_init(rdev);
1840
	if (r)
1841
		return r;
1842
	if (r600_debugfs_mc_info_init(rdev)) {
1843
		DRM_ERROR("Failed to register debugfs file for mc !\n");
1844
	}
1845
	/* This don't do much */
1846
	r = radeon_gem_init(rdev);
1847
	if (r)
1848
		return r;
1849
	/* Read BIOS */
1850
	if (!radeon_get_bios(rdev)) {
1851
		if (ASIC_IS_AVIVO(rdev))
1852
			return -EINVAL;
1853
	}
1854
	/* Must be an ATOMBIOS */
1855
	if (!rdev->is_atom_bios) {
1856
		dev_err(rdev->dev, "Expecting atombios for R600 GPU\n");
1857
		return -EINVAL;
1858
	}
1859
	r = radeon_atombios_init(rdev);
1860
	if (r)
1861
		return r;
1862
	/* Post card if necessary */
1321 serge 1863
	if (!r600_card_posted(rdev)) {
1864
		if (!rdev->bios) {
1865
			dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
1866
			return -EINVAL;
1867
		}
1221 serge 1868
		DRM_INFO("GPU not posted. posting now...\n");
1869
		atom_asic_init(rdev->mode_info.atom_context);
1870
	}
1871
	/* Initialize scratch registers */
1872
	r600_scratch_init(rdev);
1873
	/* Initialize surface registers */
1874
	radeon_surface_init(rdev);
1268 serge 1875
	/* Initialize clocks */
1221 serge 1876
	radeon_get_clock_info(rdev->ddev);
1877
	r = radeon_clocks_init(rdev);
1878
	if (r)
1879
		return r;
1268 serge 1880
	/* Initialize power management */
1881
	radeon_pm_init(rdev);
1221 serge 1882
	/* Fence driver */
1403 serge 1883
	if (rdev->flags & RADEON_IS_AGP) {
1884
		r = radeon_agp_init(rdev);
1885
		if (r)
1886
			radeon_agp_disable(rdev);
1887
	}
1221 serge 1888
	r = r600_mc_init(rdev);
1889
	if (r)
1890
		return r;
1891
	/* Memory manager */
1321 serge 1892
	r = radeon_bo_init(rdev);
1221 serge 1893
	if (r)
1894
		return r;
1321 serge 1895
 
1896
//	r = radeon_irq_kms_init(rdev);
1897
//	if (r)
1898
//		return r;
1899
 
1413 serge 1900
	rdev->cp.ring_obj = NULL;
1901
	r600_ring_init(rdev, 1024 * 1024);
1221 serge 1902
 
1321 serge 1903
//	rdev->ih.ring_obj = NULL;
1904
//	r600_ih_ring_init(rdev, 64 * 1024);
1221 serge 1905
 
1906
	r = r600_pcie_gart_init(rdev);
1907
	if (r)
1908
		return r;
1909
 
1321 serge 1910
	rdev->accel_working = true;
1221 serge 1911
	r = r600_startup(rdev);
1912
	if (r) {
1428 serge 1913
		dev_err(rdev->dev, "disabling GPU acceleration\n");
1221 serge 1914
//		r600_suspend(rdev);
1915
//		r600_wb_fini(rdev);
1916
//		radeon_ring_fini(rdev);
1917
		r600_pcie_gart_fini(rdev);
1918
		rdev->accel_working = false;
1919
	}
1920
	if (rdev->accel_working) {
1921
//		r = radeon_ib_pool_init(rdev);
1922
//		if (r) {
1923
//			DRM_ERROR("radeon: failled initializing IB pool (%d).\n", r);
1924
//			rdev->accel_working = false;
1925
//		}
1926
//		r = r600_ib_test(rdev);
1927
//		if (r) {
1928
//			DRM_ERROR("radeon: failled testing IB (%d).\n", r);
1929
//			rdev->accel_working = false;
1930
//		}
1931
	}
1932
	return 0;
1933
}
1934
 
1935
 
1936
 
1937
 
1938
 
1939
 
1940
 
1941
 
1942
 
1943
/*
1944
 * Debugfs info
1945
 */
1946
#if defined(CONFIG_DEBUG_FS)
1947
 
1948
static int r600_debugfs_cp_ring_info(struct seq_file *m, void *data)
1949
{
1950
	struct drm_info_node *node = (struct drm_info_node *) m->private;
1951
	struct drm_device *dev = node->minor->dev;
1952
	struct radeon_device *rdev = dev->dev_private;
1953
	unsigned count, i, j;
1954
 
1955
	radeon_ring_free_size(rdev);
1321 serge 1956
	count = (rdev->cp.ring_size / 4) - rdev->cp.ring_free_dw;
1221 serge 1957
	seq_printf(m, "CP_STAT 0x%08x\n", RREG32(CP_STAT));
1321 serge 1958
	seq_printf(m, "CP_RB_WPTR 0x%08x\n", RREG32(CP_RB_WPTR));
1959
	seq_printf(m, "CP_RB_RPTR 0x%08x\n", RREG32(CP_RB_RPTR));
1960
	seq_printf(m, "driver's copy of the CP_RB_WPTR 0x%08x\n", rdev->cp.wptr);
1961
	seq_printf(m, "driver's copy of the CP_RB_RPTR 0x%08x\n", rdev->cp.rptr);
1221 serge 1962
	seq_printf(m, "%u free dwords in ring\n", rdev->cp.ring_free_dw);
1963
	seq_printf(m, "%u dwords in ring\n", count);
1321 serge 1964
	i = rdev->cp.rptr;
1221 serge 1965
	for (j = 0; j <= count; j++) {
1966
		seq_printf(m, "r[%04d]=0x%08x\n", i, rdev->cp.ring[i]);
1321 serge 1967
		i = (i + 1) & rdev->cp.ptr_mask;
1221 serge 1968
	}
1969
	return 0;
1970
}
1971
 
1972
static int r600_debugfs_mc_info(struct seq_file *m, void *data)
1973
{
1974
	struct drm_info_node *node = (struct drm_info_node *) m->private;
1975
	struct drm_device *dev = node->minor->dev;
1976
	struct radeon_device *rdev = dev->dev_private;
1977
 
1978
	DREG32_SYS(m, rdev, R_000E50_SRBM_STATUS);
1979
	DREG32_SYS(m, rdev, VM_L2_STATUS);
1980
	return 0;
1981
}
1982
 
1983
static struct drm_info_list r600_mc_info_list[] = {
1984
	{"r600_mc_info", r600_debugfs_mc_info, 0, NULL},
1985
	{"r600_ring_info", r600_debugfs_cp_ring_info, 0, NULL},
1986
};
1987
#endif
1988
 
1989
int r600_debugfs_mc_info_init(struct radeon_device *rdev)
1990
{
1991
#if defined(CONFIG_DEBUG_FS)
1992
	return radeon_debugfs_add_files(rdev, r600_mc_info_list, ARRAY_SIZE(r600_mc_info_list));
1993
#else
1994
	return 0;
1995
#endif
1996
}
1404 serge 1997
 
1998
/**
1999
 * r600_ioctl_wait_idle - flush host path cache on wait idle ioctl
2000
 * rdev: radeon device structure
2001
 * bo: buffer object struct which userspace is waiting for idle
2002
 *
2003
 * Some R6XX/R7XX doesn't seems to take into account HDP flush performed
2004
 * through ring buffer, this leads to corruption in rendering, see
2005
 * http://bugzilla.kernel.org/show_bug.cgi?id=15186 to avoid this we
2006
 * directly perform HDP flush by writing register through MMIO.
2007
 */
2008
void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo)
2009
{
2010
	WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
2011
}