Subversion Repositories Kolibri OS

Rev

Rev 1246 | Rev 1313 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
1128 serge 1
/*
2
 * Copyright 2008 Advanced Micro Devices, Inc.
3
 * Copyright 2008 Red Hat Inc.
4
 * Copyright 2009 Jerome Glisse.
5
 *
6
 * Permission is hereby granted, free of charge, to any person obtaining a
7
 * copy of this software and associated documentation files (the "Software"),
8
 * to deal in the Software without restriction, including without limitation
9
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10
 * and/or sell copies of the Software, and to permit persons to whom the
11
 * Software is furnished to do so, subject to the following conditions:
12
 *
13
 * The above copyright notice and this permission notice shall be included in
14
 * all copies or substantial portions of the Software.
15
 *
16
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22
 * OTHER DEALINGS IN THE SOFTWARE.
23
 *
24
 * Authors: Dave Airlie
25
 *          Alex Deucher
26
 *          Jerome Glisse
27
 */
1233 serge 28
#include 
1221 serge 29
#include 
1128 serge 30
#include "drmP.h"
1221 serge 31
#include "radeon_drm.h"
1128 serge 32
#include "radeon.h"
1221 serge 33
#include "radeon_mode.h"
34
#include "r600d.h"
35
#include "atom.h"
36
#include "avivod.h"
1128 serge 37
 
1221 serge 38
#define PFP_UCODE_SIZE 576
39
#define PM4_UCODE_SIZE 1792
40
#define R700_PFP_UCODE_SIZE 848
41
#define R700_PM4_UCODE_SIZE 1360
1128 serge 42
 
1221 serge 43
/* Firmware Names */
44
MODULE_FIRMWARE("radeon/R600_pfp.bin");
45
MODULE_FIRMWARE("radeon/R600_me.bin");
46
MODULE_FIRMWARE("radeon/RV610_pfp.bin");
47
MODULE_FIRMWARE("radeon/RV610_me.bin");
48
MODULE_FIRMWARE("radeon/RV630_pfp.bin");
49
MODULE_FIRMWARE("radeon/RV630_me.bin");
50
MODULE_FIRMWARE("radeon/RV620_pfp.bin");
51
MODULE_FIRMWARE("radeon/RV620_me.bin");
52
MODULE_FIRMWARE("radeon/RV635_pfp.bin");
53
MODULE_FIRMWARE("radeon/RV635_me.bin");
54
MODULE_FIRMWARE("radeon/RV670_pfp.bin");
55
MODULE_FIRMWARE("radeon/RV670_me.bin");
56
MODULE_FIRMWARE("radeon/RS780_pfp.bin");
57
MODULE_FIRMWARE("radeon/RS780_me.bin");
58
MODULE_FIRMWARE("radeon/RV770_pfp.bin");
59
MODULE_FIRMWARE("radeon/RV770_me.bin");
60
MODULE_FIRMWARE("radeon/RV730_pfp.bin");
61
MODULE_FIRMWARE("radeon/RV730_me.bin");
62
MODULE_FIRMWARE("radeon/RV710_pfp.bin");
63
MODULE_FIRMWARE("radeon/RV710_me.bin");
64
 
65
int r600_debugfs_mc_info_init(struct radeon_device *rdev);
66
 
67
/* r600,rv610,rv630,rv620,rv635,rv670 */
1128 serge 68
int r600_mc_wait_for_idle(struct radeon_device *rdev);
69
void r600_gpu_init(struct radeon_device *rdev);
1221 serge 70
void r600_fini(struct radeon_device *rdev);
1128 serge 71
 
72
/*
1221 serge 73
 * R600 PCIE GART
1128 serge 74
 */
1221 serge 75
int r600_gart_clear_page(struct radeon_device *rdev, int i)
1128 serge 76
{
1221 serge 77
	void __iomem *ptr = (void *)rdev->gart.table.vram.ptr;
78
	u64 pte;
1128 serge 79
 
1221 serge 80
	if (i < 0 || i > rdev->gart.num_gpu_pages)
81
		return -EINVAL;
82
	pte = 0;
83
	writeq(pte, ((void __iomem *)ptr) + (i * 8));
84
	return 0;
85
}
1128 serge 86
 
1221 serge 87
void r600_pcie_gart_tlb_flush(struct radeon_device *rdev)
88
{
89
	unsigned i;
90
	u32 tmp;
1128 serge 91
 
1221 serge 92
	WREG32(VM_CONTEXT0_INVALIDATION_LOW_ADDR, rdev->mc.gtt_start >> 12);
93
	WREG32(VM_CONTEXT0_INVALIDATION_HIGH_ADDR, (rdev->mc.gtt_end - 1) >> 12);
94
	WREG32(VM_CONTEXT0_REQUEST_RESPONSE, REQUEST_TYPE(1));
95
	for (i = 0; i < rdev->usec_timeout; i++) {
96
		/* read MC_STATUS */
97
		tmp = RREG32(VM_CONTEXT0_REQUEST_RESPONSE);
98
		tmp = (tmp & RESPONSE_TYPE_MASK) >> RESPONSE_TYPE_SHIFT;
99
		if (tmp == 2) {
100
			printk(KERN_WARNING "[drm] r600 flush TLB failed\n");
101
			return;
102
		}
103
		if (tmp) {
104
			return;
105
		}
106
		udelay(1);
1128 serge 107
	}
1221 serge 108
}
1128 serge 109
 
1221 serge 110
int r600_pcie_gart_init(struct radeon_device *rdev)
111
{
112
	int r;
113
 
114
	if (rdev->gart.table.vram.robj) {
115
		WARN(1, "R600 PCIE GART already initialized.\n");
116
		return 0;
117
	}
118
	/* Initialize common gart structure */
119
	r = radeon_gart_init(rdev);
120
	if (r)
121
		return r;
122
	rdev->gart.table_size = rdev->gart.num_gpu_pages * 8;
123
	return radeon_gart_table_vram_alloc(rdev);
124
}
125
 
126
int r600_pcie_gart_enable(struct radeon_device *rdev)
127
{
128
	u32 tmp;
129
	int r, i;
130
 
131
	if (rdev->gart.table.vram.robj == NULL) {
132
		dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
133
		return -EINVAL;
134
	}
135
	r = radeon_gart_table_vram_pin(rdev);
136
	if (r)
137
		return r;
138
 
139
	/* Setup L2 cache */
140
	WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
141
				ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
142
				EFFECTIVE_L2_QUEUE_SIZE(7));
143
	WREG32(VM_L2_CNTL2, 0);
144
	WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
145
	/* Setup TLB control */
146
	tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
147
		SYSTEM_ACCESS_MODE_NOT_IN_SYS |
148
		EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
149
		ENABLE_WAIT_L2_QUERY;
150
	WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
151
	WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
152
	WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp | ENABLE_L1_STRICT_ORDERING);
153
	WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
154
	WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
155
	WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
156
	WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
157
	WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
158
	WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
159
	WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
160
	WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
161
	WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
162
	WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
163
	WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
164
	WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
165
	WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
166
	WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
167
	WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
168
				RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
169
	WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
170
			(u32)(rdev->dummy_page.addr >> 12));
171
	for (i = 1; i < 7; i++)
172
		WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
173
 
174
	r600_pcie_gart_tlb_flush(rdev);
175
	rdev->gart.ready = true;
1128 serge 176
	return 0;
177
}
178
 
1221 serge 179
void r600_pcie_gart_disable(struct radeon_device *rdev)
1128 serge 180
{
1221 serge 181
	u32 tmp;
182
	int i;
183
 
184
	/* Disable all tables */
185
	for (i = 0; i < 7; i++)
186
		WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
187
 
188
	/* Disable L2 cache */
189
	WREG32(VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING |
190
				EFFECTIVE_L2_QUEUE_SIZE(7));
191
	WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
192
	/* Setup L1 TLB control */
193
	tmp = EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
194
		ENABLE_WAIT_L2_QUERY;
195
	WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
196
	WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
197
	WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
198
	WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
199
	WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
200
	WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
201
	WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
202
	WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
203
	WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp);
204
	WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp);
205
	WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
206
	WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
207
	WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp);
208
	WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
209
	if (rdev->gart.table.vram.robj) {
1233 serge 210
//       radeon_object_kunmap(rdev->gart.table.vram.robj);
211
//       radeon_object_unpin(rdev->gart.table.vram.robj);
1221 serge 212
	}
1128 serge 213
}
214
 
1221 serge 215
void r600_pcie_gart_fini(struct radeon_device *rdev)
216
{
217
	r600_pcie_gart_disable(rdev);
218
	radeon_gart_table_vram_free(rdev);
219
	radeon_gart_fini(rdev);
220
}
1128 serge 221
 
1221 serge 222
void r600_agp_enable(struct radeon_device *rdev)
1128 serge 223
{
1221 serge 224
	u32 tmp;
225
	int i;
226
 
227
	/* Setup L2 cache */
228
	WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
229
				ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
230
				EFFECTIVE_L2_QUEUE_SIZE(7));
231
	WREG32(VM_L2_CNTL2, 0);
232
	WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
233
	/* Setup TLB control */
234
	tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
235
		SYSTEM_ACCESS_MODE_NOT_IN_SYS |
236
		EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
237
		ENABLE_WAIT_L2_QUERY;
238
	WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
239
	WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
240
	WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp | ENABLE_L1_STRICT_ORDERING);
241
	WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
242
	WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
243
	WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
244
	WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
245
	WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
246
	WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
247
	WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
248
	WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
249
	WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
250
	WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
251
	WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
252
	for (i = 0; i < 7; i++)
253
		WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
1128 serge 254
}
255
 
256
int r600_mc_wait_for_idle(struct radeon_device *rdev)
257
{
1221 serge 258
	unsigned i;
259
	u32 tmp;
260
 
261
	for (i = 0; i < rdev->usec_timeout; i++) {
262
		/* read MC_STATUS */
263
		tmp = RREG32(R_000E50_SRBM_STATUS) & 0x3F00;
264
		if (!tmp)
1128 serge 265
	return 0;
1221 serge 266
		udelay(1);
267
	}
268
	return -1;
1128 serge 269
}
270
 
1221 serge 271
static void r600_mc_program(struct radeon_device *rdev)
1128 serge 272
{
1221 serge 273
	struct rv515_mc_save save;
274
	u32 tmp;
275
	int i, j;
276
 
277
	/* Initialize HDP */
278
	for (i = 0, j = 0; i < 32; i++, j += 0x18) {
279
		WREG32((0x2c14 + j), 0x00000000);
280
		WREG32((0x2c18 + j), 0x00000000);
281
		WREG32((0x2c1c + j), 0x00000000);
282
		WREG32((0x2c20 + j), 0x00000000);
283
		WREG32((0x2c24 + j), 0x00000000);
284
	}
285
	WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
286
 
287
	rv515_mc_stop(rdev, &save);
288
	if (r600_mc_wait_for_idle(rdev)) {
289
		dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
290
	}
291
	/* Lockout access through VGA aperture (doesn't exist before R600) */
292
	WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
293
	/* Update configuration */
294
	if (rdev->flags & RADEON_IS_AGP) {
295
		if (rdev->mc.vram_start < rdev->mc.gtt_start) {
296
			/* VRAM before AGP */
297
			WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
298
				rdev->mc.vram_start >> 12);
299
			WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
300
				rdev->mc.gtt_end >> 12);
301
		} else {
302
			/* VRAM after AGP */
303
			WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
304
				rdev->mc.gtt_start >> 12);
305
			WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
306
				rdev->mc.vram_end >> 12);
307
		}
308
	} else {
309
		WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start >> 12);
310
		WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.vram_end >> 12);
311
	}
312
	WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0);
313
	tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
314
	tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
315
	WREG32(MC_VM_FB_LOCATION, tmp);
316
	WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
317
	WREG32(HDP_NONSURFACE_INFO, (2 << 7));
318
	WREG32(HDP_NONSURFACE_SIZE, rdev->mc.mc_vram_size | 0x3FF);
319
	if (rdev->flags & RADEON_IS_AGP) {
320
		WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 22);
321
		WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 22);
322
		WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22);
323
	} else {
324
		WREG32(MC_VM_AGP_BASE, 0);
325
		WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
326
		WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
327
	}
328
	if (r600_mc_wait_for_idle(rdev)) {
329
		dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
330
	}
331
	rv515_mc_resume(rdev, &save);
332
	/* we need to own VRAM, so turn off the VGA renderer here
333
	 * to stop it overwriting our objects */
334
	rv515_vga_render_disable(rdev);
1128 serge 335
}
336
 
1221 serge 337
int r600_mc_init(struct radeon_device *rdev)
1128 serge 338
{
1221 serge 339
	fixed20_12 a;
340
	u32 tmp;
1268 serge 341
	int chansize, numchan;
1221 serge 342
	int r;
1128 serge 343
 
1221 serge 344
	/* Get VRAM informations */
1128 serge 345
	rdev->mc.vram_is_ddr = true;
1221 serge 346
	tmp = RREG32(RAMCFG);
347
	if (tmp & CHANSIZE_OVERRIDE) {
1128 serge 348
		chansize = 16;
1221 serge 349
	} else if (tmp & CHANSIZE_MASK) {
1128 serge 350
		chansize = 64;
351
	} else {
352
		chansize = 32;
353
	}
1268 serge 354
	tmp = RREG32(CHMAP);
355
	switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
356
	case 0:
357
	default:
358
		numchan = 1;
359
		break;
360
	case 1:
361
		numchan = 2;
362
		break;
363
	case 2:
364
		numchan = 4;
365
		break;
366
	case 3:
367
		numchan = 8;
368
		break;
1128 serge 369
	}
1268 serge 370
	rdev->mc.vram_width = numchan * chansize;
1221 serge 371
	/* Could aper size report 0 ? */
372
	rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
373
	rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
374
	/* Setup GPU memory space */
375
	rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
376
	rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
377
 
378
	if (rdev->mc.mc_vram_size > rdev->mc.aper_size)
379
		rdev->mc.mc_vram_size = rdev->mc.aper_size;
380
 
381
	if (rdev->mc.real_vram_size > rdev->mc.aper_size)
382
		rdev->mc.real_vram_size = rdev->mc.aper_size;
383
 
384
	if (rdev->flags & RADEON_IS_AGP) {
385
		r = radeon_agp_init(rdev);
386
		if (r)
387
			return r;
388
		/* gtt_size is setup by radeon_agp_init */
389
		rdev->mc.gtt_location = rdev->mc.agp_base;
390
		tmp = 0xFFFFFFFFUL - rdev->mc.agp_base - rdev->mc.gtt_size;
391
		/* Try to put vram before or after AGP because we
392
		 * we want SYSTEM_APERTURE to cover both VRAM and
393
		 * AGP so that GPU can catch out of VRAM/AGP access
394
		 */
395
		if (rdev->mc.gtt_location > rdev->mc.mc_vram_size) {
396
			/* Enought place before */
397
			rdev->mc.vram_location = rdev->mc.gtt_location -
398
							rdev->mc.mc_vram_size;
399
		} else if (tmp > rdev->mc.mc_vram_size) {
400
			/* Enought place after */
401
			rdev->mc.vram_location = rdev->mc.gtt_location +
402
							rdev->mc.gtt_size;
403
		} else {
404
			/* Try to setup VRAM then AGP might not
405
			 * not work on some card
406
			 */
407
			rdev->mc.vram_location = 0x00000000UL;
408
			rdev->mc.gtt_location = rdev->mc.mc_vram_size;
409
		}
410
	} else {
1268 serge 411
		rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
1221 serge 412
			rdev->mc.vram_location = (RREG32(MC_VM_FB_LOCATION) &
413
								0xFFFF) << 24;
414
			tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size;
415
			if ((0xFFFFFFFFUL - tmp) >= rdev->mc.gtt_size) {
416
				/* Enough place after vram */
417
				rdev->mc.gtt_location = tmp;
418
			} else if (rdev->mc.vram_location >= rdev->mc.gtt_size) {
419
				/* Enough place before vram */
420
				rdev->mc.gtt_location = 0;
421
			} else {
422
				/* Not enough place after or before shrink
423
				 * gart size
424
				 */
425
				if (rdev->mc.vram_location > (0xFFFFFFFFUL - tmp)) {
426
					rdev->mc.gtt_location = 0;
427
					rdev->mc.gtt_size = rdev->mc.vram_location;
428
				} else {
429
					rdev->mc.gtt_location = tmp;
430
					rdev->mc.gtt_size = 0xFFFFFFFFUL - tmp;
431
				}
432
			}
433
			rdev->mc.gtt_location = rdev->mc.mc_vram_size;
434
	}
435
	rdev->mc.vram_start = rdev->mc.vram_location;
436
	rdev->mc.vram_end = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1;
437
	rdev->mc.gtt_start = rdev->mc.gtt_location;
438
	rdev->mc.gtt_end = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
439
	/* FIXME: we should enforce default clock in case GPU is not in
440
	 * default setup
441
	 */
442
	a.full = rfixed_const(100);
443
	rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk);
444
	rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
445
	return 0;
1128 serge 446
}
447
 
1221 serge 448
/* We doesn't check that the GPU really needs a reset we simply do the
449
 * reset, it's up to the caller to determine if the GPU needs one. We
450
 * might add an helper function to check that.
451
 */
452
int r600_gpu_soft_reset(struct radeon_device *rdev)
1128 serge 453
{
1221 serge 454
	struct rv515_mc_save save;
455
	u32 grbm_busy_mask = S_008010_VC_BUSY(1) | S_008010_VGT_BUSY_NO_DMA(1) |
456
				S_008010_VGT_BUSY(1) | S_008010_TA03_BUSY(1) |
457
				S_008010_TC_BUSY(1) | S_008010_SX_BUSY(1) |
458
				S_008010_SH_BUSY(1) | S_008010_SPI03_BUSY(1) |
459
				S_008010_SMX_BUSY(1) | S_008010_SC_BUSY(1) |
460
				S_008010_PA_BUSY(1) | S_008010_DB03_BUSY(1) |
461
				S_008010_CR_BUSY(1) | S_008010_CB03_BUSY(1) |
462
				S_008010_GUI_ACTIVE(1);
463
	u32 grbm2_busy_mask = S_008014_SPI0_BUSY(1) | S_008014_SPI1_BUSY(1) |
464
			S_008014_SPI2_BUSY(1) | S_008014_SPI3_BUSY(1) |
465
			S_008014_TA0_BUSY(1) | S_008014_TA1_BUSY(1) |
466
			S_008014_TA2_BUSY(1) | S_008014_TA3_BUSY(1) |
467
			S_008014_DB0_BUSY(1) | S_008014_DB1_BUSY(1) |
468
			S_008014_DB2_BUSY(1) | S_008014_DB3_BUSY(1) |
469
			S_008014_CB0_BUSY(1) | S_008014_CB1_BUSY(1) |
470
			S_008014_CB2_BUSY(1) | S_008014_CB3_BUSY(1);
471
	u32 srbm_reset = 0;
472
	u32 tmp;
1128 serge 473
 
1221 serge 474
	dev_info(rdev->dev, "GPU softreset \n");
475
	dev_info(rdev->dev, "  R_008010_GRBM_STATUS=0x%08X\n",
476
		RREG32(R_008010_GRBM_STATUS));
477
	dev_info(rdev->dev, "  R_008014_GRBM_STATUS2=0x%08X\n",
478
		RREG32(R_008014_GRBM_STATUS2));
479
	dev_info(rdev->dev, "  R_000E50_SRBM_STATUS=0x%08X\n",
480
		RREG32(R_000E50_SRBM_STATUS));
481
	rv515_mc_stop(rdev, &save);
482
	if (r600_mc_wait_for_idle(rdev)) {
483
		dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
484
	}
485
	/* Disable CP parsing/prefetching */
486
	WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(0xff));
487
	/* Check if any of the rendering block is busy and reset it */
488
	if ((RREG32(R_008010_GRBM_STATUS) & grbm_busy_mask) ||
489
	    (RREG32(R_008014_GRBM_STATUS2) & grbm2_busy_mask)) {
490
		tmp = S_008020_SOFT_RESET_CR(1) |
491
			S_008020_SOFT_RESET_DB(1) |
492
			S_008020_SOFT_RESET_CB(1) |
493
			S_008020_SOFT_RESET_PA(1) |
494
			S_008020_SOFT_RESET_SC(1) |
495
			S_008020_SOFT_RESET_SMX(1) |
496
			S_008020_SOFT_RESET_SPI(1) |
497
			S_008020_SOFT_RESET_SX(1) |
498
			S_008020_SOFT_RESET_SH(1) |
499
			S_008020_SOFT_RESET_TC(1) |
500
			S_008020_SOFT_RESET_TA(1) |
501
			S_008020_SOFT_RESET_VC(1) |
502
			S_008020_SOFT_RESET_VGT(1);
503
		dev_info(rdev->dev, "  R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
504
		WREG32(R_008020_GRBM_SOFT_RESET, tmp);
505
		(void)RREG32(R_008020_GRBM_SOFT_RESET);
506
		udelay(50);
507
		WREG32(R_008020_GRBM_SOFT_RESET, 0);
508
		(void)RREG32(R_008020_GRBM_SOFT_RESET);
509
	}
510
	/* Reset CP (we always reset CP) */
511
	tmp = S_008020_SOFT_RESET_CP(1);
512
	dev_info(rdev->dev, "R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
513
	WREG32(R_008020_GRBM_SOFT_RESET, tmp);
514
	(void)RREG32(R_008020_GRBM_SOFT_RESET);
515
	udelay(50);
516
	WREG32(R_008020_GRBM_SOFT_RESET, 0);
517
	(void)RREG32(R_008020_GRBM_SOFT_RESET);
518
	/* Reset others GPU block if necessary */
519
	if (G_000E50_RLC_BUSY(RREG32(R_000E50_SRBM_STATUS)))
520
		srbm_reset |= S_000E60_SOFT_RESET_RLC(1);
521
	if (G_000E50_GRBM_RQ_PENDING(RREG32(R_000E50_SRBM_STATUS)))
522
		srbm_reset |= S_000E60_SOFT_RESET_GRBM(1);
523
	if (G_000E50_HI_RQ_PENDING(RREG32(R_000E50_SRBM_STATUS)))
524
		srbm_reset |= S_000E60_SOFT_RESET_IH(1);
525
	if (G_000E50_VMC_BUSY(RREG32(R_000E50_SRBM_STATUS)))
526
		srbm_reset |= S_000E60_SOFT_RESET_VMC(1);
527
	if (G_000E50_MCB_BUSY(RREG32(R_000E50_SRBM_STATUS)))
528
		srbm_reset |= S_000E60_SOFT_RESET_MC(1);
529
	if (G_000E50_MCDZ_BUSY(RREG32(R_000E50_SRBM_STATUS)))
530
		srbm_reset |= S_000E60_SOFT_RESET_MC(1);
531
	if (G_000E50_MCDY_BUSY(RREG32(R_000E50_SRBM_STATUS)))
532
		srbm_reset |= S_000E60_SOFT_RESET_MC(1);
533
	if (G_000E50_MCDX_BUSY(RREG32(R_000E50_SRBM_STATUS)))
534
		srbm_reset |= S_000E60_SOFT_RESET_MC(1);
535
	if (G_000E50_MCDW_BUSY(RREG32(R_000E50_SRBM_STATUS)))
536
		srbm_reset |= S_000E60_SOFT_RESET_MC(1);
537
	if (G_000E50_RLC_BUSY(RREG32(R_000E50_SRBM_STATUS)))
538
		srbm_reset |= S_000E60_SOFT_RESET_RLC(1);
539
	if (G_000E50_SEM_BUSY(RREG32(R_000E50_SRBM_STATUS)))
540
		srbm_reset |= S_000E60_SOFT_RESET_SEM(1);
541
	if (G_000E50_BIF_BUSY(RREG32(R_000E50_SRBM_STATUS)))
542
		srbm_reset |= S_000E60_SOFT_RESET_BIF(1);
543
	dev_info(rdev->dev, "  R_000E60_SRBM_SOFT_RESET=0x%08X\n", srbm_reset);
544
	WREG32(R_000E60_SRBM_SOFT_RESET, srbm_reset);
545
	(void)RREG32(R_000E60_SRBM_SOFT_RESET);
546
	udelay(50);
547
	WREG32(R_000E60_SRBM_SOFT_RESET, 0);
548
	(void)RREG32(R_000E60_SRBM_SOFT_RESET);
549
	WREG32(R_000E60_SRBM_SOFT_RESET, srbm_reset);
550
	(void)RREG32(R_000E60_SRBM_SOFT_RESET);
551
	udelay(50);
552
	WREG32(R_000E60_SRBM_SOFT_RESET, 0);
553
	(void)RREG32(R_000E60_SRBM_SOFT_RESET);
554
	/* Wait a little for things to settle down */
555
	udelay(50);
556
	dev_info(rdev->dev, "  R_008010_GRBM_STATUS=0x%08X\n",
557
		RREG32(R_008010_GRBM_STATUS));
558
	dev_info(rdev->dev, "  R_008014_GRBM_STATUS2=0x%08X\n",
559
		RREG32(R_008014_GRBM_STATUS2));
560
	dev_info(rdev->dev, "  R_000E50_SRBM_STATUS=0x%08X\n",
561
		RREG32(R_000E50_SRBM_STATUS));
562
	/* After reset we need to reinit the asic as GPU often endup in an
563
	 * incoherent state.
564
	 */
565
	atom_asic_init(rdev->mode_info.atom_context);
566
	rv515_mc_resume(rdev, &save);
567
	return 0;
1128 serge 568
}
569
 
1221 serge 570
int r600_gpu_reset(struct radeon_device *rdev)
571
{
572
	return r600_gpu_soft_reset(rdev);
573
}
574
 
575
static u32 r600_get_tile_pipe_to_backend_map(u32 num_tile_pipes,
576
					     u32 num_backends,
577
					     u32 backend_disable_mask)
578
{
579
	u32 backend_map = 0;
580
	u32 enabled_backends_mask;
581
	u32 enabled_backends_count;
582
	u32 cur_pipe;
583
	u32 swizzle_pipe[R6XX_MAX_PIPES];
584
	u32 cur_backend;
585
	u32 i;
586
 
587
	if (num_tile_pipes > R6XX_MAX_PIPES)
588
		num_tile_pipes = R6XX_MAX_PIPES;
589
	if (num_tile_pipes < 1)
590
		num_tile_pipes = 1;
591
	if (num_backends > R6XX_MAX_BACKENDS)
592
		num_backends = R6XX_MAX_BACKENDS;
593
	if (num_backends < 1)
594
		num_backends = 1;
595
 
596
	enabled_backends_mask = 0;
597
	enabled_backends_count = 0;
598
	for (i = 0; i < R6XX_MAX_BACKENDS; ++i) {
599
		if (((backend_disable_mask >> i) & 1) == 0) {
600
			enabled_backends_mask |= (1 << i);
601
			++enabled_backends_count;
602
		}
603
		if (enabled_backends_count == num_backends)
604
			break;
605
	}
606
 
607
	if (enabled_backends_count == 0) {
608
		enabled_backends_mask = 1;
609
		enabled_backends_count = 1;
610
	}
611
 
612
	if (enabled_backends_count != num_backends)
613
		num_backends = enabled_backends_count;
614
 
615
	memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * R6XX_MAX_PIPES);
616
	switch (num_tile_pipes) {
617
	case 1:
618
		swizzle_pipe[0] = 0;
619
		break;
620
	case 2:
621
		swizzle_pipe[0] = 0;
622
		swizzle_pipe[1] = 1;
623
		break;
624
	case 3:
625
		swizzle_pipe[0] = 0;
626
		swizzle_pipe[1] = 1;
627
		swizzle_pipe[2] = 2;
628
		break;
629
	case 4:
630
		swizzle_pipe[0] = 0;
631
		swizzle_pipe[1] = 1;
632
		swizzle_pipe[2] = 2;
633
		swizzle_pipe[3] = 3;
634
		break;
635
	case 5:
636
		swizzle_pipe[0] = 0;
637
		swizzle_pipe[1] = 1;
638
		swizzle_pipe[2] = 2;
639
		swizzle_pipe[3] = 3;
640
		swizzle_pipe[4] = 4;
641
		break;
642
	case 6:
643
		swizzle_pipe[0] = 0;
644
		swizzle_pipe[1] = 2;
645
		swizzle_pipe[2] = 4;
646
		swizzle_pipe[3] = 5;
647
		swizzle_pipe[4] = 1;
648
		swizzle_pipe[5] = 3;
649
		break;
650
	case 7:
651
		swizzle_pipe[0] = 0;
652
		swizzle_pipe[1] = 2;
653
		swizzle_pipe[2] = 4;
654
		swizzle_pipe[3] = 6;
655
		swizzle_pipe[4] = 1;
656
		swizzle_pipe[5] = 3;
657
		swizzle_pipe[6] = 5;
658
		break;
659
	case 8:
660
		swizzle_pipe[0] = 0;
661
		swizzle_pipe[1] = 2;
662
		swizzle_pipe[2] = 4;
663
		swizzle_pipe[3] = 6;
664
		swizzle_pipe[4] = 1;
665
		swizzle_pipe[5] = 3;
666
		swizzle_pipe[6] = 5;
667
		swizzle_pipe[7] = 7;
668
		break;
669
	}
670
 
671
	cur_backend = 0;
672
	for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) {
673
		while (((1 << cur_backend) & enabled_backends_mask) == 0)
674
			cur_backend = (cur_backend + 1) % R6XX_MAX_BACKENDS;
675
 
676
		backend_map |= (u32)(((cur_backend & 3) << (swizzle_pipe[cur_pipe] * 2)));
677
 
678
		cur_backend = (cur_backend + 1) % R6XX_MAX_BACKENDS;
679
	}
680
 
681
	return backend_map;
682
}
683
 
684
int r600_count_pipe_bits(uint32_t val)
685
{
686
	int i, ret = 0;
687
 
688
	for (i = 0; i < 32; i++) {
689
		ret += val & 1;
690
		val >>= 1;
691
	}
692
	return ret;
693
}
694
 
695
void r600_gpu_init(struct radeon_device *rdev)
696
{
697
	u32 tiling_config;
698
	u32 ramcfg;
699
	u32 tmp;
700
	int i, j;
701
	u32 sq_config;
702
	u32 sq_gpr_resource_mgmt_1 = 0;
703
	u32 sq_gpr_resource_mgmt_2 = 0;
704
	u32 sq_thread_resource_mgmt = 0;
705
	u32 sq_stack_resource_mgmt_1 = 0;
706
	u32 sq_stack_resource_mgmt_2 = 0;
707
 
708
	/* FIXME: implement */
709
	switch (rdev->family) {
710
	case CHIP_R600:
711
		rdev->config.r600.max_pipes = 4;
712
		rdev->config.r600.max_tile_pipes = 8;
713
		rdev->config.r600.max_simds = 4;
714
		rdev->config.r600.max_backends = 4;
715
		rdev->config.r600.max_gprs = 256;
716
		rdev->config.r600.max_threads = 192;
717
		rdev->config.r600.max_stack_entries = 256;
718
		rdev->config.r600.max_hw_contexts = 8;
719
		rdev->config.r600.max_gs_threads = 16;
720
		rdev->config.r600.sx_max_export_size = 128;
721
		rdev->config.r600.sx_max_export_pos_size = 16;
722
		rdev->config.r600.sx_max_export_smx_size = 128;
723
		rdev->config.r600.sq_num_cf_insts = 2;
724
		break;
725
	case CHIP_RV630:
726
	case CHIP_RV635:
727
		rdev->config.r600.max_pipes = 2;
728
		rdev->config.r600.max_tile_pipes = 2;
729
		rdev->config.r600.max_simds = 3;
730
		rdev->config.r600.max_backends = 1;
731
		rdev->config.r600.max_gprs = 128;
732
		rdev->config.r600.max_threads = 192;
733
		rdev->config.r600.max_stack_entries = 128;
734
		rdev->config.r600.max_hw_contexts = 8;
735
		rdev->config.r600.max_gs_threads = 4;
736
		rdev->config.r600.sx_max_export_size = 128;
737
		rdev->config.r600.sx_max_export_pos_size = 16;
738
		rdev->config.r600.sx_max_export_smx_size = 128;
739
		rdev->config.r600.sq_num_cf_insts = 2;
740
		break;
741
	case CHIP_RV610:
742
	case CHIP_RV620:
743
	case CHIP_RS780:
744
	case CHIP_RS880:
745
		rdev->config.r600.max_pipes = 1;
746
		rdev->config.r600.max_tile_pipes = 1;
747
		rdev->config.r600.max_simds = 2;
748
		rdev->config.r600.max_backends = 1;
749
		rdev->config.r600.max_gprs = 128;
750
		rdev->config.r600.max_threads = 192;
751
		rdev->config.r600.max_stack_entries = 128;
752
		rdev->config.r600.max_hw_contexts = 4;
753
		rdev->config.r600.max_gs_threads = 4;
754
		rdev->config.r600.sx_max_export_size = 128;
755
		rdev->config.r600.sx_max_export_pos_size = 16;
756
		rdev->config.r600.sx_max_export_smx_size = 128;
757
		rdev->config.r600.sq_num_cf_insts = 1;
758
		break;
759
	case CHIP_RV670:
760
		rdev->config.r600.max_pipes = 4;
761
		rdev->config.r600.max_tile_pipes = 4;
762
		rdev->config.r600.max_simds = 4;
763
		rdev->config.r600.max_backends = 4;
764
		rdev->config.r600.max_gprs = 192;
765
		rdev->config.r600.max_threads = 192;
766
		rdev->config.r600.max_stack_entries = 256;
767
		rdev->config.r600.max_hw_contexts = 8;
768
		rdev->config.r600.max_gs_threads = 16;
769
		rdev->config.r600.sx_max_export_size = 128;
770
		rdev->config.r600.sx_max_export_pos_size = 16;
771
		rdev->config.r600.sx_max_export_smx_size = 128;
772
		rdev->config.r600.sq_num_cf_insts = 2;
773
		break;
774
	default:
775
		break;
776
	}
777
 
778
	/* Initialize HDP */
779
	for (i = 0, j = 0; i < 32; i++, j += 0x18) {
780
		WREG32((0x2c14 + j), 0x00000000);
781
		WREG32((0x2c18 + j), 0x00000000);
782
		WREG32((0x2c1c + j), 0x00000000);
783
		WREG32((0x2c20 + j), 0x00000000);
784
		WREG32((0x2c24 + j), 0x00000000);
785
	}
786
 
787
	WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
788
 
789
	/* Setup tiling */
790
	tiling_config = 0;
791
	ramcfg = RREG32(RAMCFG);
792
	switch (rdev->config.r600.max_tile_pipes) {
793
	case 1:
794
		tiling_config |= PIPE_TILING(0);
795
		break;
796
	case 2:
797
		tiling_config |= PIPE_TILING(1);
798
		break;
799
	case 4:
800
		tiling_config |= PIPE_TILING(2);
801
		break;
802
	case 8:
803
		tiling_config |= PIPE_TILING(3);
804
		break;
805
	default:
806
		break;
807
	}
808
	tiling_config |= BANK_TILING((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
809
	tiling_config |= GROUP_SIZE(0);
810
	tmp = (ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT;
811
	if (tmp > 3) {
812
		tiling_config |= ROW_TILING(3);
813
		tiling_config |= SAMPLE_SPLIT(3);
814
	} else {
815
		tiling_config |= ROW_TILING(tmp);
816
		tiling_config |= SAMPLE_SPLIT(tmp);
817
	}
818
	tiling_config |= BANK_SWAPS(1);
819
	tmp = r600_get_tile_pipe_to_backend_map(rdev->config.r600.max_tile_pipes,
820
						rdev->config.r600.max_backends,
821
						(0xff << rdev->config.r600.max_backends) & 0xff);
822
	tiling_config |= BACKEND_MAP(tmp);
823
	WREG32(GB_TILING_CONFIG, tiling_config);
824
	WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff);
825
	WREG32(HDP_TILING_CONFIG, tiling_config & 0xffff);
826
 
827
	tmp = BACKEND_DISABLE((R6XX_MAX_BACKENDS_MASK << rdev->config.r600.max_backends) & R6XX_MAX_BACKENDS_MASK);
828
	WREG32(CC_RB_BACKEND_DISABLE, tmp);
829
 
830
	/* Setup pipes */
831
	tmp = INACTIVE_QD_PIPES((R6XX_MAX_PIPES_MASK << rdev->config.r600.max_pipes) & R6XX_MAX_PIPES_MASK);
832
	tmp |= INACTIVE_SIMDS((R6XX_MAX_SIMDS_MASK << rdev->config.r600.max_simds) & R6XX_MAX_SIMDS_MASK);
833
	WREG32(CC_GC_SHADER_PIPE_CONFIG, tmp);
834
	WREG32(GC_USER_SHADER_PIPE_CONFIG, tmp);
835
 
836
	tmp = R6XX_MAX_BACKENDS - r600_count_pipe_bits(tmp & INACTIVE_QD_PIPES_MASK);
837
	WREG32(VGT_OUT_DEALLOC_CNTL, (tmp * 4) & DEALLOC_DIST_MASK);
838
	WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((tmp * 4) - 2) & VTX_REUSE_DEPTH_MASK);
839
 
840
	/* Setup some CP states */
841
	WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) | ROQ_IB2_START(0x2b)));
842
	WREG32(CP_MEQ_THRESHOLDS, (MEQ_END(0x40) | ROQ_END(0x40)));
843
 
844
	WREG32(TA_CNTL_AUX, (DISABLE_CUBE_ANISO | SYNC_GRADIENT |
845
			     SYNC_WALKER | SYNC_ALIGNER));
846
	/* Setup various GPU states */
847
	if (rdev->family == CHIP_RV670)
848
		WREG32(ARB_GDEC_RD_CNTL, 0x00000021);
849
 
850
	tmp = RREG32(SX_DEBUG_1);
851
	tmp |= SMX_EVENT_RELEASE;
852
	if ((rdev->family > CHIP_R600))
853
		tmp |= ENABLE_NEW_SMX_ADDRESS;
854
	WREG32(SX_DEBUG_1, tmp);
855
 
856
	if (((rdev->family) == CHIP_R600) ||
857
	    ((rdev->family) == CHIP_RV630) ||
858
	    ((rdev->family) == CHIP_RV610) ||
859
	    ((rdev->family) == CHIP_RV620) ||
1268 serge 860
	    ((rdev->family) == CHIP_RS780) ||
861
	    ((rdev->family) == CHIP_RS880)) {
1221 serge 862
		WREG32(DB_DEBUG, PREZ_MUST_WAIT_FOR_POSTZ_DONE);
863
	} else {
864
		WREG32(DB_DEBUG, 0);
865
	}
866
	WREG32(DB_WATERMARKS, (DEPTH_FREE(4) | DEPTH_CACHELINE_FREE(16) |
867
			       DEPTH_FLUSH(16) | DEPTH_PENDING_FREE(4)));
868
 
869
	WREG32(PA_SC_MULTI_CHIP_CNTL, 0);
870
	WREG32(VGT_NUM_INSTANCES, 0);
871
 
872
	WREG32(SPI_CONFIG_CNTL, GPR_WRITE_PRIORITY(0));
873
	WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(0));
874
 
875
	tmp = RREG32(SQ_MS_FIFO_SIZES);
876
	if (((rdev->family) == CHIP_RV610) ||
877
	    ((rdev->family) == CHIP_RV620) ||
1268 serge 878
	    ((rdev->family) == CHIP_RS780) ||
879
	    ((rdev->family) == CHIP_RS880)) {
1221 serge 880
		tmp = (CACHE_FIFO_SIZE(0xa) |
881
		       FETCH_FIFO_HIWATER(0xa) |
882
		       DONE_FIFO_HIWATER(0xe0) |
883
		       ALU_UPDATE_FIFO_HIWATER(0x8));
884
	} else if (((rdev->family) == CHIP_R600) ||
885
		   ((rdev->family) == CHIP_RV630)) {
886
		tmp &= ~DONE_FIFO_HIWATER(0xff);
887
		tmp |= DONE_FIFO_HIWATER(0x4);
888
	}
889
	WREG32(SQ_MS_FIFO_SIZES, tmp);
890
 
891
	/* SQ_CONFIG, SQ_GPR_RESOURCE_MGMT, SQ_THREAD_RESOURCE_MGMT, SQ_STACK_RESOURCE_MGMT
892
	 * should be adjusted as needed by the 2D/3D drivers.  This just sets default values
893
	 */
894
	sq_config = RREG32(SQ_CONFIG);
895
	sq_config &= ~(PS_PRIO(3) |
896
		       VS_PRIO(3) |
897
		       GS_PRIO(3) |
898
		       ES_PRIO(3));
899
	sq_config |= (DX9_CONSTS |
900
		      VC_ENABLE |
901
		      PS_PRIO(0) |
902
		      VS_PRIO(1) |
903
		      GS_PRIO(2) |
904
		      ES_PRIO(3));
905
 
906
	if ((rdev->family) == CHIP_R600) {
907
		sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(124) |
908
					  NUM_VS_GPRS(124) |
909
					  NUM_CLAUSE_TEMP_GPRS(4));
910
		sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(0) |
911
					  NUM_ES_GPRS(0));
912
		sq_thread_resource_mgmt = (NUM_PS_THREADS(136) |
913
					   NUM_VS_THREADS(48) |
914
					   NUM_GS_THREADS(4) |
915
					   NUM_ES_THREADS(4));
916
		sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(128) |
917
					    NUM_VS_STACK_ENTRIES(128));
918
		sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(0) |
919
					    NUM_ES_STACK_ENTRIES(0));
920
	} else if (((rdev->family) == CHIP_RV610) ||
921
		   ((rdev->family) == CHIP_RV620) ||
1268 serge 922
		   ((rdev->family) == CHIP_RS780) ||
923
		   ((rdev->family) == CHIP_RS880)) {
1221 serge 924
		/* no vertex cache */
925
		sq_config &= ~VC_ENABLE;
926
 
927
		sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
928
					  NUM_VS_GPRS(44) |
929
					  NUM_CLAUSE_TEMP_GPRS(2));
930
		sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(17) |
931
					  NUM_ES_GPRS(17));
932
		sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
933
					   NUM_VS_THREADS(78) |
934
					   NUM_GS_THREADS(4) |
935
					   NUM_ES_THREADS(31));
936
		sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(40) |
937
					    NUM_VS_STACK_ENTRIES(40));
938
		sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(32) |
939
					    NUM_ES_STACK_ENTRIES(16));
940
	} else if (((rdev->family) == CHIP_RV630) ||
941
		   ((rdev->family) == CHIP_RV635)) {
942
		sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
943
					  NUM_VS_GPRS(44) |
944
					  NUM_CLAUSE_TEMP_GPRS(2));
945
		sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(18) |
946
					  NUM_ES_GPRS(18));
947
		sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
948
					   NUM_VS_THREADS(78) |
949
					   NUM_GS_THREADS(4) |
950
					   NUM_ES_THREADS(31));
951
		sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(40) |
952
					    NUM_VS_STACK_ENTRIES(40));
953
		sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(32) |
954
					    NUM_ES_STACK_ENTRIES(16));
955
	} else if ((rdev->family) == CHIP_RV670) {
956
		sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
957
					  NUM_VS_GPRS(44) |
958
					  NUM_CLAUSE_TEMP_GPRS(2));
959
		sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(17) |
960
					  NUM_ES_GPRS(17));
961
		sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
962
					   NUM_VS_THREADS(78) |
963
					   NUM_GS_THREADS(4) |
964
					   NUM_ES_THREADS(31));
965
		sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(64) |
966
					    NUM_VS_STACK_ENTRIES(64));
967
		sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(64) |
968
					    NUM_ES_STACK_ENTRIES(64));
969
	}
970
 
971
	WREG32(SQ_CONFIG, sq_config);
972
	WREG32(SQ_GPR_RESOURCE_MGMT_1,  sq_gpr_resource_mgmt_1);
973
	WREG32(SQ_GPR_RESOURCE_MGMT_2,  sq_gpr_resource_mgmt_2);
974
	WREG32(SQ_THREAD_RESOURCE_MGMT, sq_thread_resource_mgmt);
975
	WREG32(SQ_STACK_RESOURCE_MGMT_1, sq_stack_resource_mgmt_1);
976
	WREG32(SQ_STACK_RESOURCE_MGMT_2, sq_stack_resource_mgmt_2);
977
 
978
	if (((rdev->family) == CHIP_RV610) ||
979
	    ((rdev->family) == CHIP_RV620) ||
1268 serge 980
	    ((rdev->family) == CHIP_RS780) ||
981
	    ((rdev->family) == CHIP_RS880)) {
1221 serge 982
		WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(TC_ONLY));
983
	} else {
984
		WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC));
985
	}
986
 
987
	/* More default values. 2D/3D driver should adjust as needed */
988
	WREG32(PA_SC_AA_SAMPLE_LOCS_2S, (S0_X(0xc) | S0_Y(0x4) |
989
					 S1_X(0x4) | S1_Y(0xc)));
990
	WREG32(PA_SC_AA_SAMPLE_LOCS_4S, (S0_X(0xe) | S0_Y(0xe) |
991
					 S1_X(0x2) | S1_Y(0x2) |
992
					 S2_X(0xa) | S2_Y(0x6) |
993
					 S3_X(0x6) | S3_Y(0xa)));
994
	WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD0, (S0_X(0xe) | S0_Y(0xb) |
995
					     S1_X(0x4) | S1_Y(0xc) |
996
					     S2_X(0x1) | S2_Y(0x6) |
997
					     S3_X(0xa) | S3_Y(0xe)));
998
	WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD1, (S4_X(0x6) | S4_Y(0x1) |
999
					     S5_X(0x0) | S5_Y(0x0) |
1000
					     S6_X(0xb) | S6_Y(0x4) |
1001
					     S7_X(0x7) | S7_Y(0x8)));
1002
 
1003
	WREG32(VGT_STRMOUT_EN, 0);
1004
	tmp = rdev->config.r600.max_pipes * 16;
1005
	switch (rdev->family) {
1006
	case CHIP_RV610:
1268 serge 1007
	case CHIP_RV620:
1221 serge 1008
	case CHIP_RS780:
1268 serge 1009
	case CHIP_RS880:
1221 serge 1010
		tmp += 32;
1011
		break;
1012
	case CHIP_RV670:
1013
		tmp += 128;
1014
		break;
1015
	default:
1016
		break;
1017
	}
1018
	if (tmp > 256) {
1019
		tmp = 256;
1020
	}
1021
	WREG32(VGT_ES_PER_GS, 128);
1022
	WREG32(VGT_GS_PER_ES, tmp);
1023
	WREG32(VGT_GS_PER_VS, 2);
1024
	WREG32(VGT_GS_VERTEX_REUSE, 16);
1025
 
1026
	/* more default values. 2D/3D driver should adjust as needed */
1027
	WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
1028
	WREG32(VGT_STRMOUT_EN, 0);
1029
	WREG32(SX_MISC, 0);
1030
	WREG32(PA_SC_MODE_CNTL, 0);
1031
	WREG32(PA_SC_AA_CONFIG, 0);
1032
	WREG32(PA_SC_LINE_STIPPLE, 0);
1033
	WREG32(SPI_INPUT_Z, 0);
1034
	WREG32(SPI_PS_IN_CONTROL_0, NUM_INTERP(2));
1035
	WREG32(CB_COLOR7_FRAG, 0);
1036
 
1037
	/* Clear render buffer base addresses */
1038
	WREG32(CB_COLOR0_BASE, 0);
1039
	WREG32(CB_COLOR1_BASE, 0);
1040
	WREG32(CB_COLOR2_BASE, 0);
1041
	WREG32(CB_COLOR3_BASE, 0);
1042
	WREG32(CB_COLOR4_BASE, 0);
1043
	WREG32(CB_COLOR5_BASE, 0);
1044
	WREG32(CB_COLOR6_BASE, 0);
1045
	WREG32(CB_COLOR7_BASE, 0);
1046
	WREG32(CB_COLOR7_FRAG, 0);
1047
 
1048
	switch (rdev->family) {
1049
	case CHIP_RV610:
1268 serge 1050
	case CHIP_RV620:
1221 serge 1051
	case CHIP_RS780:
1268 serge 1052
	case CHIP_RS880:
1221 serge 1053
		tmp = TC_L2_SIZE(8);
1054
		break;
1055
	case CHIP_RV630:
1056
	case CHIP_RV635:
1057
		tmp = TC_L2_SIZE(4);
1058
		break;
1059
	case CHIP_R600:
1060
		tmp = TC_L2_SIZE(0) | L2_DISABLE_LATE_HIT;
1061
		break;
1062
	default:
1063
		tmp = TC_L2_SIZE(0);
1064
		break;
1065
	}
1066
	WREG32(TC_CNTL, tmp);
1067
 
1068
	tmp = RREG32(HDP_HOST_PATH_CNTL);
1069
	WREG32(HDP_HOST_PATH_CNTL, tmp);
1070
 
1071
	tmp = RREG32(ARB_POP);
1072
	tmp |= ENABLE_TC128;
1073
	WREG32(ARB_POP, tmp);
1074
 
1075
	WREG32(PA_SC_MULTI_CHIP_CNTL, 0);
1076
	WREG32(PA_CL_ENHANCE, (CLIP_VTX_REORDER_ENA |
1077
			       NUM_CLIP_SEQ(3)));
1078
	WREG32(PA_SC_ENHANCE, FORCE_EOV_MAX_CLK_CNT(4095));
1079
}
1080
 
1081
 
1128 serge 1082
/*
1083
 * Indirect registers accessor
1084
 */
1221 serge 1085
u32 r600_pciep_rreg(struct radeon_device *rdev, u32 reg)
1128 serge 1086
{
1221 serge 1087
	u32 r;
1128 serge 1088
 
1221 serge 1089
	WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
1090
	(void)RREG32(PCIE_PORT_INDEX);
1091
	r = RREG32(PCIE_PORT_DATA);
1128 serge 1092
	return r;
1093
}
1094
 
1221 serge 1095
void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v)
1128 serge 1096
{
1221 serge 1097
	WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
1098
	(void)RREG32(PCIE_PORT_INDEX);
1099
	WREG32(PCIE_PORT_DATA, (v));
1100
	(void)RREG32(PCIE_PORT_DATA);
1128 serge 1101
}
1221 serge 1102
 
1103
 
1104
/*
1105
 * CP & Ring
1106
 */
1107
void r600_cp_stop(struct radeon_device *rdev)
1108
{
1109
	WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
1110
}
1111
int r600_cp_start(struct radeon_device *rdev)
1112
{
1113
	int r;
1114
	uint32_t cp_me;
1115
 
1116
	r = radeon_ring_lock(rdev, 7);
1117
	if (r) {
1118
		DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
1119
		return r;
1120
	}
1121
	radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5));
1122
	radeon_ring_write(rdev, 0x1);
1123
	if (rdev->family < CHIP_RV770) {
1124
		radeon_ring_write(rdev, 0x3);
1125
		radeon_ring_write(rdev, rdev->config.r600.max_hw_contexts - 1);
1126
	} else {
1127
		radeon_ring_write(rdev, 0x0);
1128
		radeon_ring_write(rdev, rdev->config.rv770.max_hw_contexts - 1);
1129
	}
1130
	radeon_ring_write(rdev, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
1131
	radeon_ring_write(rdev, 0);
1132
	radeon_ring_write(rdev, 0);
1133
	radeon_ring_unlock_commit(rdev);
1134
 
1135
	cp_me = 0xff;
1136
	WREG32(R_0086D8_CP_ME_CNTL, cp_me);
1137
	return 0;
1138
}
1139
void r600_cp_commit(struct radeon_device *rdev)
1140
{
1141
	WREG32(CP_RB_WPTR, rdev->cp.wptr);
1142
	(void)RREG32(CP_RB_WPTR);
1143
}
1144
 
1233 serge 1145
void r600_ring_init(struct radeon_device *rdev, unsigned ring_size)
1146
{
1147
	u32 rb_bufsz;
1221 serge 1148
 
1233 serge 1149
	/* Align ring size */
1150
	rb_bufsz = drm_order(ring_size / 8);
1151
	ring_size = (1 << (rb_bufsz + 1)) * 4;
1152
	rdev->cp.ring_size = ring_size;
1153
	rdev->cp.align_mask = 16 - 1;
1154
}
1155
 
1156
 
1157
/*
1158
 * GPU scratch registers helpers function.
1159
 */
1160
void r600_scratch_init(struct radeon_device *rdev)
1161
{
1162
	int i;
1163
 
1164
	rdev->scratch.num_reg = 7;
1165
	for (i = 0; i < rdev->scratch.num_reg; i++) {
1166
		rdev->scratch.free[i] = true;
1167
		rdev->scratch.reg[i] = SCRATCH_REG0 + (i * 4);
1168
	}
1169
}
1221 serge 1170
int r600_set_surface_reg(struct radeon_device *rdev, int reg,
1171
			 uint32_t tiling_flags, uint32_t pitch,
1172
			 uint32_t offset, uint32_t obj_size)
1173
{
1174
	/* FIXME: implement */
1175
	return 0;
1176
}
1177
 
1178
void r600_clear_surface_reg(struct radeon_device *rdev, int reg)
1179
{
1180
	/* FIXME: implement */
1181
}
1182
 
1183
 
1184
bool r600_card_posted(struct radeon_device *rdev)
1185
{
1186
	uint32_t reg;
1187
 
1188
	/* first check CRTCs */
1189
	reg = RREG32(D1CRTC_CONTROL) |
1190
		RREG32(D2CRTC_CONTROL);
1191
	if (reg & CRTC_EN)
1192
		return true;
1193
 
1194
	/* then check MEM_SIZE, in case the crtcs are off */
1195
	if (RREG32(CONFIG_MEMSIZE))
1196
		return true;
1197
 
1198
	return false;
1199
}
1200
 
1201
int r600_startup(struct radeon_device *rdev)
1202
{
1203
	int r;
1204
 
1205
	r600_mc_program(rdev);
1206
	if (rdev->flags & RADEON_IS_AGP) {
1207
		r600_agp_enable(rdev);
1208
	} else {
1209
		r = r600_pcie_gart_enable(rdev);
1210
		if (r)
1211
			return r;
1212
	}
1213
	r600_gpu_init(rdev);
1214
 
1233 serge 1215
//	r = radeon_object_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
1216
//			      &rdev->r600_blit.shader_gpu_addr);
1217
//	if (r) {
1218
//		DRM_ERROR("failed to pin blit object %d\n", r);
1219
//		return r;
1220
//	}
1221 serge 1221
 
1233 serge 1222
//	r = radeon_ring_init(rdev, rdev->cp.ring_size);
1223
//	if (r)
1224
//		return r;
1225
//	r = r600_cp_load_microcode(rdev);
1226
//	if (r)
1227
//		return r;
1228
//	r = r600_cp_resume(rdev);
1229
//	if (r)
1230
//		return r;
1221 serge 1231
	/* write back buffer are not vital so don't worry about failure */
1233 serge 1232
//	r600_wb_enable(rdev);
1221 serge 1233
	return 0;
1234
}
1235
 
1236
void r600_vga_set_state(struct radeon_device *rdev, bool state)
1237
{
1238
	uint32_t temp;
1239
 
1240
	temp = RREG32(CONFIG_CNTL);
1241
	if (state == false) {
1242
		temp &= ~(1<<0);
1243
		temp |= (1<<1);
1244
	} else {
1245
		temp &= ~(1<<1);
1246
	}
1247
	WREG32(CONFIG_CNTL, temp);
1248
}
1249
 
1250
 
1251
 
1252
 
1253
 
1254
/* Plan is to move initialization in that function and use
1255
 * helper function so that radeon_device_init pretty much
1256
 * do nothing more than calling asic specific function. This
1257
 * should also allow to remove a bunch of callback function
1258
 * like vram_info.
1259
 */
1260
int r600_init(struct radeon_device *rdev)
1261
{
1262
	int r;
1263
 
1264
	r = radeon_dummy_page_init(rdev);
1265
	if (r)
1266
		return r;
1267
	if (r600_debugfs_mc_info_init(rdev)) {
1268
		DRM_ERROR("Failed to register debugfs file for mc !\n");
1269
	}
1270
	/* This don't do much */
1271
	r = radeon_gem_init(rdev);
1272
	if (r)
1273
		return r;
1274
	/* Read BIOS */
1275
	if (!radeon_get_bios(rdev)) {
1276
		if (ASIC_IS_AVIVO(rdev))
1277
			return -EINVAL;
1278
	}
1279
	/* Must be an ATOMBIOS */
1280
	if (!rdev->is_atom_bios) {
1281
		dev_err(rdev->dev, "Expecting atombios for R600 GPU\n");
1282
		return -EINVAL;
1283
	}
1284
	r = radeon_atombios_init(rdev);
1285
	if (r)
1286
		return r;
1287
	/* Post card if necessary */
1288
	if (!r600_card_posted(rdev) && rdev->bios) {
1289
		DRM_INFO("GPU not posted. posting now...\n");
1290
		atom_asic_init(rdev->mode_info.atom_context);
1291
	}
1292
	/* Initialize scratch registers */
1293
	r600_scratch_init(rdev);
1294
	/* Initialize surface registers */
1295
	radeon_surface_init(rdev);
1268 serge 1296
	/* Initialize clocks */
1221 serge 1297
	radeon_get_clock_info(rdev->ddev);
1298
	r = radeon_clocks_init(rdev);
1299
	if (r)
1300
		return r;
1268 serge 1301
	/* Initialize power management */
1302
	radeon_pm_init(rdev);
1221 serge 1303
	/* Fence driver */
1304
//	r = radeon_fence_driver_init(rdev);
1305
//	if (r)
1306
//		return r;
1307
	r = r600_mc_init(rdev);
1246 serge 1308
    dbgprintf("mc vram location %x\n", rdev->mc.vram_location);
1221 serge 1309
	if (r)
1310
		return r;
1311
	/* Memory manager */
1312
	r = radeon_object_init(rdev);
1313
	if (r)
1314
		return r;
1233 serge 1315
//	rdev->cp.ring_obj = NULL;
1316
//	r600_ring_init(rdev, 1024 * 1024);
1221 serge 1317
 
1233 serge 1318
//	if (!rdev->me_fw || !rdev->pfp_fw) {
1319
//		r = r600_cp_init_microcode(rdev);
1320
//		if (r) {
1321
//			DRM_ERROR("Failed to load firmware!\n");
1322
//			return r;
1323
//		}
1324
//	}
1221 serge 1325
 
1326
	r = r600_pcie_gart_init(rdev);
1327
	if (r)
1328
		return r;
1329
 
1330
	rdev->accel_working = true;
1233 serge 1331
//	r = r600_blit_init(rdev);
1332
//	if (r) {
1333
//		DRM_ERROR("radeon: failled blitter (%d).\n", r);
1334
//		return r;
1335
//	}
1221 serge 1336
 
1337
	r = r600_startup(rdev);
1338
	if (r) {
1339
//		r600_suspend(rdev);
1340
//		r600_wb_fini(rdev);
1341
//		radeon_ring_fini(rdev);
1342
		r600_pcie_gart_fini(rdev);
1343
		rdev->accel_working = false;
1344
	}
1345
	if (rdev->accel_working) {
1346
//		r = radeon_ib_pool_init(rdev);
1347
//		if (r) {
1348
//			DRM_ERROR("radeon: failled initializing IB pool (%d).\n", r);
1349
//			rdev->accel_working = false;
1350
//		}
1351
//		r = r600_ib_test(rdev);
1352
//		if (r) {
1353
//			DRM_ERROR("radeon: failled testing IB (%d).\n", r);
1354
//			rdev->accel_working = false;
1355
//		}
1356
	}
1357
	return 0;
1358
}
1359
 
1360
 
1361
 
1362
 
1363
 
1364
 
1365
 
1366
 
1367
 
1368
/*
1369
 * Debugfs info
1370
 */
1371
#if defined(CONFIG_DEBUG_FS)
1372
 
1373
static int r600_debugfs_cp_ring_info(struct seq_file *m, void *data)
1374
{
1375
	struct drm_info_node *node = (struct drm_info_node *) m->private;
1376
	struct drm_device *dev = node->minor->dev;
1377
	struct radeon_device *rdev = dev->dev_private;
1378
	uint32_t rdp, wdp;
1379
	unsigned count, i, j;
1380
 
1381
	radeon_ring_free_size(rdev);
1382
	rdp = RREG32(CP_RB_RPTR);
1383
	wdp = RREG32(CP_RB_WPTR);
1384
	count = (rdp + rdev->cp.ring_size - wdp) & rdev->cp.ptr_mask;
1385
	seq_printf(m, "CP_STAT 0x%08x\n", RREG32(CP_STAT));
1386
	seq_printf(m, "CP_RB_WPTR 0x%08x\n", wdp);
1387
	seq_printf(m, "CP_RB_RPTR 0x%08x\n", rdp);
1388
	seq_printf(m, "%u free dwords in ring\n", rdev->cp.ring_free_dw);
1389
	seq_printf(m, "%u dwords in ring\n", count);
1390
	for (j = 0; j <= count; j++) {
1391
		i = (rdp + j) & rdev->cp.ptr_mask;
1392
		seq_printf(m, "r[%04d]=0x%08x\n", i, rdev->cp.ring[i]);
1393
	}
1394
	return 0;
1395
}
1396
 
1397
static int r600_debugfs_mc_info(struct seq_file *m, void *data)
1398
{
1399
	struct drm_info_node *node = (struct drm_info_node *) m->private;
1400
	struct drm_device *dev = node->minor->dev;
1401
	struct radeon_device *rdev = dev->dev_private;
1402
 
1403
	DREG32_SYS(m, rdev, R_000E50_SRBM_STATUS);
1404
	DREG32_SYS(m, rdev, VM_L2_STATUS);
1405
	return 0;
1406
}
1407
 
1408
static struct drm_info_list r600_mc_info_list[] = {
1409
	{"r600_mc_info", r600_debugfs_mc_info, 0, NULL},
1410
	{"r600_ring_info", r600_debugfs_cp_ring_info, 0, NULL},
1411
};
1412
#endif
1413
 
1414
int r600_debugfs_mc_info_init(struct radeon_device *rdev)
1415
{
1416
#if defined(CONFIG_DEBUG_FS)
1417
	return radeon_debugfs_add_files(rdev, r600_mc_info_list, ARRAY_SIZE(r600_mc_info_list));
1418
#else
1419
	return 0;
1420
#endif
1421
}