Subversion Repositories Kolibri OS

Rev

Rev 1414 | Rev 1963 | Go to most recent revision | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 1414 Rev 1430
Line 115... Line 115...
115
		return -EINVAL;
115
		return -EINVAL;
116
	}
116
	}
117
	r = radeon_gart_table_vram_pin(rdev);
117
	r = radeon_gart_table_vram_pin(rdev);
118
	if (r)
118
	if (r)
119
		return r;
119
		return r;
-
 
120
	radeon_gart_restore(rdev);
120
	/* discard memory request outside of configured range */
121
	/* discard memory request outside of configured range */
121
	tmp = RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
122
	tmp = RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
122
	WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp);
123
	WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp);
123
	WREG32_PCIE(RADEON_PCIE_TX_GART_START_LO, rdev->mc.gtt_location);
124
	WREG32_PCIE(RADEON_PCIE_TX_GART_START_LO, rdev->mc.gtt_start);
124
	tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - RADEON_GPU_PAGE_SIZE;
125
	tmp = rdev->mc.gtt_end & ~RADEON_GPU_PAGE_MASK;
125
	WREG32_PCIE(RADEON_PCIE_TX_GART_END_LO, tmp);
126
	WREG32_PCIE(RADEON_PCIE_TX_GART_END_LO, tmp);
126
	WREG32_PCIE(RADEON_PCIE_TX_GART_START_HI, 0);
127
	WREG32_PCIE(RADEON_PCIE_TX_GART_START_HI, 0);
127
	WREG32_PCIE(RADEON_PCIE_TX_GART_END_HI, 0);
128
	WREG32_PCIE(RADEON_PCIE_TX_GART_END_HI, 0);
128
	table_addr = rdev->gart.table_addr;
129
	table_addr = rdev->gart.table_addr;
129
	WREG32_PCIE(RADEON_PCIE_TX_GART_BASE, table_addr);
130
	WREG32_PCIE(RADEON_PCIE_TX_GART_BASE, table_addr);
130
	/* FIXME: setup default page */
131
	/* FIXME: setup default page */
131
	WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_LO, rdev->mc.vram_location);
132
	WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_LO, rdev->mc.vram_start);
132
	WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_HI, 0);
133
	WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_HI, 0);
133
	/* Clear error */
134
	/* Clear error */
134
	WREG32_PCIE(0x18, 0);
135
	WREG32_PCIE(0x18, 0);
135
	tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
136
	tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
136
	tmp |= RADEON_PCIE_TX_GART_EN;
137
	tmp |= RADEON_PCIE_TX_GART_EN;
Line 172... Line 173...
172
			  struct radeon_fence *fence)
173
			  struct radeon_fence *fence)
173
{
174
{
174
	/* Who ever call radeon_fence_emit should call ring_lock and ask
175
	/* Who ever call radeon_fence_emit should call ring_lock and ask
175
	 * for enough space (today caller are ib schedule and buffer move) */
176
	 * for enough space (today caller are ib schedule and buffer move) */
176
	/* Write SC register so SC & US assert idle */
177
	/* Write SC register so SC & US assert idle */
177
	radeon_ring_write(rdev, PACKET0(0x43E0, 0));
178
	radeon_ring_write(rdev, PACKET0(R300_RE_SCISSORS_TL, 0));
178
	radeon_ring_write(rdev, 0);
179
	radeon_ring_write(rdev, 0);
179
	radeon_ring_write(rdev, PACKET0(0x43E4, 0));
180
	radeon_ring_write(rdev, PACKET0(R300_RE_SCISSORS_BR, 0));
180
	radeon_ring_write(rdev, 0);
181
	radeon_ring_write(rdev, 0);
181
	/* Flush 3D cache */
182
	/* Flush 3D cache */
182
	radeon_ring_write(rdev, PACKET0(0x4E4C, 0));
183
	radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
183
	radeon_ring_write(rdev, (2 << 0));
184
	radeon_ring_write(rdev, R300_RB3D_DC_FLUSH);
184
	radeon_ring_write(rdev, PACKET0(0x4F18, 0));
185
	radeon_ring_write(rdev, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
185
	radeon_ring_write(rdev, (1 << 0));
186
	radeon_ring_write(rdev, R300_ZC_FLUSH);
186
	/* Wait until IDLE & CLEAN */
187
	/* Wait until IDLE & CLEAN */
187
	radeon_ring_write(rdev, PACKET0(0x1720, 0));
188
	radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
188
	radeon_ring_write(rdev, (1 << 17) | (1 << 16)  | (1 << 9));
189
	radeon_ring_write(rdev, (RADEON_WAIT_3D_IDLECLEAN |
-
 
190
				 RADEON_WAIT_2D_IDLECLEAN |
-
 
191
				 RADEON_WAIT_DMA_GUI_IDLE));
189
	radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0));
192
	radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0));
190
	radeon_ring_write(rdev, rdev->config.r300.hdp_cntl |
193
	radeon_ring_write(rdev, rdev->config.r300.hdp_cntl |
191
				RADEON_HDP_READ_BUFFER_INVALIDATE);
194
				RADEON_HDP_READ_BUFFER_INVALIDATE);
192
	radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0));
195
	radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0));
193
	radeon_ring_write(rdev, rdev->config.r300.hdp_cntl);
196
	radeon_ring_write(rdev, rdev->config.r300.hdp_cntl);
Line 196... Line 199...
196
	radeon_ring_write(rdev, fence->seq);
199
	radeon_ring_write(rdev, fence->seq);
197
	radeon_ring_write(rdev, PACKET0(RADEON_GEN_INT_STATUS, 0));
200
	radeon_ring_write(rdev, PACKET0(RADEON_GEN_INT_STATUS, 0));
198
	radeon_ring_write(rdev, RADEON_SW_INT_FIRE);
201
	radeon_ring_write(rdev, RADEON_SW_INT_FIRE);
199
}
202
}
Line 200... Line -...
200
 
-
 
201
 
-
 
202
#if 0
-
 
203
 
-
 
204
 
-
 
205
int r300_copy_dma(struct radeon_device *rdev,
-
 
206
		  uint64_t src_offset,
-
 
207
		  uint64_t dst_offset,
-
 
208
		  unsigned num_pages,
-
 
209
		  struct radeon_fence *fence)
-
 
210
{
-
 
211
	uint32_t size;
-
 
212
	uint32_t cur_size;
-
 
213
	int i, num_loops;
-
 
214
	int r = 0;
-
 
215
 
-
 
216
	/* radeon pitch is /64 */
-
 
217
	size = num_pages << PAGE_SHIFT;
-
 
218
	num_loops = DIV_ROUND_UP(size, 0x1FFFFF);
-
 
219
	r = radeon_ring_lock(rdev, num_loops * 4 + 64);
-
 
220
	if (r) {
-
 
221
		DRM_ERROR("radeon: moving bo (%d).\n", r);
-
 
222
		return r;
-
 
223
	}
-
 
224
	/* Must wait for 2D idle & clean before DMA or hangs might happen */
-
 
225
	radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0 ));
-
 
226
	radeon_ring_write(rdev, (1 << 16));
-
 
227
	for (i = 0; i < num_loops; i++) {
-
 
228
		cur_size = size;
-
 
229
		if (cur_size > 0x1FFFFF) {
-
 
230
			cur_size = 0x1FFFFF;
-
 
231
		}
-
 
232
		size -= cur_size;
-
 
233
		radeon_ring_write(rdev, PACKET0(0x720, 2));
-
 
234
		radeon_ring_write(rdev, src_offset);
-
 
235
		radeon_ring_write(rdev, dst_offset);
-
 
236
		radeon_ring_write(rdev, cur_size | (1 << 31) | (1 << 30));
-
 
237
		src_offset += cur_size;
-
 
238
		dst_offset += cur_size;
-
 
239
	}
-
 
240
	radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
-
 
241
	radeon_ring_write(rdev, RADEON_WAIT_DMA_GUI_IDLE);
-
 
242
	if (fence) {
-
 
243
		r = radeon_fence_emit(rdev, fence);
-
 
244
	}
-
 
245
	radeon_ring_unlock_commit(rdev);
-
 
246
	return r;
-
 
247
}
-
 
248
 
-
 
249
#endif
-
 
250
 
203
 
251
void r300_ring_start(struct radeon_device *rdev)
204
void r300_ring_start(struct radeon_device *rdev)
252
{
205
{
253
	unsigned gb_tile_config;
206
	unsigned gb_tile_config;
Line 285... Line 238...
285
	radeon_ring_write(rdev, gb_tile_config);
238
	radeon_ring_write(rdev, gb_tile_config);
286
	radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
239
	radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
287
	radeon_ring_write(rdev,
240
	radeon_ring_write(rdev,
288
			  RADEON_WAIT_2D_IDLECLEAN |
241
			  RADEON_WAIT_2D_IDLECLEAN |
289
			  RADEON_WAIT_3D_IDLECLEAN);
242
			  RADEON_WAIT_3D_IDLECLEAN);
290
	radeon_ring_write(rdev, PACKET0(0x170C, 0));
243
	radeon_ring_write(rdev, PACKET0(R300_DST_PIPE_CONFIG, 0));
291
	radeon_ring_write(rdev, 1 << 31);
244
	radeon_ring_write(rdev, R300_PIPE_AUTO_CONFIG);
292
	radeon_ring_write(rdev, PACKET0(R300_GB_SELECT, 0));
245
	radeon_ring_write(rdev, PACKET0(R300_GB_SELECT, 0));
293
	radeon_ring_write(rdev, 0);
246
	radeon_ring_write(rdev, 0);
294
	radeon_ring_write(rdev, PACKET0(R300_GB_ENABLE, 0));
247
	radeon_ring_write(rdev, PACKET0(R300_GB_ENABLE, 0));
295
	radeon_ring_write(rdev, 0);
248
	radeon_ring_write(rdev, 0);
296
	radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
249
	radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
Line 353... Line 306...
353
	unsigned i;
306
	unsigned i;
354
	uint32_t tmp;
307
	uint32_t tmp;
Line 355... Line 308...
355
 
308
 
356
	for (i = 0; i < rdev->usec_timeout; i++) {
309
	for (i = 0; i < rdev->usec_timeout; i++) {
357
		/* read MC_STATUS */
310
		/* read MC_STATUS */
358
		tmp = RREG32(0x0150);
311
		tmp = RREG32(RADEON_MC_STATUS);
359
		if (tmp & (1 << 4)) {
312
		if (tmp & R300_MC_IDLE) {
360
			return 0;
313
			return 0;
361
		}
314
		}
362
		DRM_UDELAY(1);
315
		DRM_UDELAY(1);
363
	}
316
	}
Line 399... Line 352...
399
	if (r100_gui_wait_for_idle(rdev)) {
352
	if (r100_gui_wait_for_idle(rdev)) {
400
		printk(KERN_WARNING "Failed to wait GUI idle while "
353
		printk(KERN_WARNING "Failed to wait GUI idle while "
401
		       "programming pipes. Bad things might happen.\n");
354
		       "programming pipes. Bad things might happen.\n");
402
	}
355
	}
Line 403... Line 356...
403
 
356
 
404
	tmp = RREG32(0x170C);
357
	tmp = RREG32(R300_DST_PIPE_CONFIG);
Line 405... Line 358...
405
	WREG32(0x170C, tmp | (1 << 31));
358
	WREG32(R300_DST_PIPE_CONFIG, tmp | R300_PIPE_AUTO_CONFIG);
406
 
359
 
407
	WREG32(R300_RB2D_DSTCACHE_MODE,
360
	WREG32(R300_RB2D_DSTCACHE_MODE,
Line 441... Line 394...
441
		if (tmp & ((1 << 20) | (1 << 26))) {
394
		if (tmp & ((1 << 20) | (1 << 26))) {
442
			DRM_ERROR("VAP & CP still busy (RBBM_STATUS=0x%08X)", tmp);
395
			DRM_ERROR("VAP & CP still busy (RBBM_STATUS=0x%08X)", tmp);
443
			/* GA still busy soft reset it */
396
			/* GA still busy soft reset it */
444
			WREG32(0x429C, 0x200);
397
			WREG32(0x429C, 0x200);
445
			WREG32(R300_VAP_PVS_STATE_FLUSH_REG, 0);
398
			WREG32(R300_VAP_PVS_STATE_FLUSH_REG, 0);
446
			WREG32(0x43E0, 0);
399
			WREG32(R300_RE_SCISSORS_TL, 0);
447
			WREG32(0x43E4, 0);
400
			WREG32(R300_RE_SCISSORS_BR, 0);
448
			WREG32(0x24AC, 0);
401
			WREG32(0x24AC, 0);
449
		}
402
		}
450
		/* Wait to prevent race in RBBM_STATUS */
403
		/* Wait to prevent race in RBBM_STATUS */
451
		mdelay(1);
404
		mdelay(1);
452
		tmp = RREG32(RADEON_RBBM_STATUS);
405
		tmp = RREG32(RADEON_RBBM_STATUS);
Line 492... Line 445...
492
	if (status & (1 << 16)) {
445
	if (status & (1 << 16)) {
493
		r100_cp_reset(rdev);
446
		r100_cp_reset(rdev);
494
	}
447
	}
495
	/* Check if GPU is idle */
448
	/* Check if GPU is idle */
496
	status = RREG32(RADEON_RBBM_STATUS);
449
	status = RREG32(RADEON_RBBM_STATUS);
497
	if (status & (1 << 31)) {
450
	if (status & RADEON_RBBM_ACTIVE) {
498
		DRM_ERROR("Failed to reset GPU (RBBM_STATUS=0x%08X)\n", status);
451
		DRM_ERROR("Failed to reset GPU (RBBM_STATUS=0x%08X)\n", status);
499
		return -1;
452
		return -1;
500
	}
453
	}
501
	DRM_INFO("GPU reset succeed (RBBM_STATUS=0x%08X)\n", status);
454
	DRM_INFO("GPU reset succeed (RBBM_STATUS=0x%08X)\n", status);
502
	return 0;
455
	return 0;
Line 504... Line 457...
504
 
457
 
505
 
458
 
506
/*
459
/*
507
 * r300,r350,rv350,rv380 VRAM info
460
 * r300,r350,rv350,rv380 VRAM info
508
 */
461
 */
-
 
462
void r300_mc_init(struct radeon_device *rdev)
509
void r300_vram_info(struct radeon_device *rdev)
463
{
Line 510... Line 464...
510
{
464
	u64 base;
511
	uint32_t tmp;
465
	u32 tmp;
512
 
-
 
513
	/* DDR for all card after R300 & IGP */
466
 
514
	rdev->mc.vram_is_ddr = true;
467
	/* DDR for all card after R300 & IGP */
515
 
468
	rdev->mc.vram_is_ddr = true;
516
	tmp = RREG32(RADEON_MEM_CNTL);
469
	tmp = RREG32(RADEON_MEM_CNTL);
517
	tmp &= R300_MEM_NUM_CHANNELS_MASK;
470
	tmp &= R300_MEM_NUM_CHANNELS_MASK;
518
	switch (tmp) {
471
	switch (tmp) {
519
	case 0: rdev->mc.vram_width = 64; break;
472
	case 0: rdev->mc.vram_width = 64; break;
520
	case 1: rdev->mc.vram_width = 128; break;
473
	case 1: rdev->mc.vram_width = 128; break;
521
	case 2: rdev->mc.vram_width = 256; break;
-
 
522
	default:  rdev->mc.vram_width = 128; break;
474
	case 2: rdev->mc.vram_width = 256; break;
-
 
475
	default:  rdev->mc.vram_width = 128; break;
-
 
476
	}
-
 
477
	r100_vram_init_sizes(rdev);
-
 
478
	base = rdev->mc.aper_base;
-
 
479
	if (rdev->flags & RADEON_IS_IGP)
-
 
480
		base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16;
523
	}
481
	radeon_vram_location(rdev, &rdev->mc, base);
Line 524... Line 482...
524
 
482
	if (!(rdev->flags & RADEON_IS_AGP))
525
	r100_vram_init_sizes(rdev);
483
		radeon_gtt_location(rdev, &rdev->mc);
526
}
484
}
Line 582... Line 540...
582
	while (link_width_cntl == 0xffffffff)
540
	while (link_width_cntl == 0xffffffff)
583
		link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
541
		link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
Line 584... Line 542...
584
 
542
 
Line -... Line 543...
-
 
543
}
-
 
544
 
-
 
545
int rv370_get_pcie_lanes(struct radeon_device *rdev)
-
 
546
{
-
 
547
	u32 link_width_cntl;
-
 
548
 
-
 
549
	if (rdev->flags & RADEON_IS_IGP)
-
 
550
		return 0;
-
 
551
 
-
 
552
	if (!(rdev->flags & RADEON_IS_PCIE))
-
 
553
		return 0;
-
 
554
 
-
 
555
	/* FIXME wait for idle */
-
 
556
 
-
 
557
	if (rdev->family < CHIP_R600)
-
 
558
		link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
-
 
559
	else
-
 
560
		link_width_cntl = RREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
-
 
561
 
-
 
562
	switch ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) >> RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT) {
-
 
563
	case RADEON_PCIE_LC_LINK_WIDTH_X0:
-
 
564
		return 0;
-
 
565
	case RADEON_PCIE_LC_LINK_WIDTH_X1:
-
 
566
		return 1;
-
 
567
	case RADEON_PCIE_LC_LINK_WIDTH_X2:
-
 
568
		return 2;
-
 
569
	case RADEON_PCIE_LC_LINK_WIDTH_X4:
-
 
570
		return 4;
-
 
571
	case RADEON_PCIE_LC_LINK_WIDTH_X8:
-
 
572
		return 8;
-
 
573
	case RADEON_PCIE_LC_LINK_WIDTH_X16:
-
 
574
	default:
-
 
575
		return 16;
-
 
576
	}
585
}
577
}
586
 
578
 
587
#if defined(CONFIG_DEBUG_FS)
579
#if defined(CONFIG_DEBUG_FS)
588
static int rv370_debugfs_pcie_gart_info(struct seq_file *m, void *data)
580
static int rv370_debugfs_pcie_gart_info(struct seq_file *m, void *data)
589
{
581
{
Line 714... Line 706...
714
 
706
 
715
		if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
707
		if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
716
			tile_flags |= R300_TXO_MACRO_TILE;
708
			tile_flags |= R300_TXO_MACRO_TILE;
717
		if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
709
		if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
-
 
710
			tile_flags |= R300_TXO_MICRO_TILE;
-
 
711
		else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE)
Line 718... Line 712...
718
			tile_flags |= R300_TXO_MICRO_TILE;
712
			tile_flags |= R300_TXO_MICRO_TILE_SQUARE;
719
 
713
 
720
		tmp = idx_value + ((u32)reloc->lobj.gpu_offset);
714
		tmp = idx_value + ((u32)reloc->lobj.gpu_offset);
721
		tmp |= tile_flags;
715
		tmp |= tile_flags;
Line 764... Line 758...
764
 
758
 
765
		if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
759
		if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
766
			tile_flags |= R300_COLOR_TILE_ENABLE;
760
			tile_flags |= R300_COLOR_TILE_ENABLE;
767
		if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
761
		if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
-
 
762
			tile_flags |= R300_COLOR_MICROTILE_ENABLE;
-
 
763
		else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE)
Line 768... Line 764...
768
			tile_flags |= R300_COLOR_MICROTILE_ENABLE;
764
			tile_flags |= R300_COLOR_MICROTILE_SQUARE_ENABLE;
769
 
765
 
770
		tmp = idx_value & ~(0x7 << 16);
766
		tmp = idx_value & ~(0x7 << 16);
Line 835... Line 831...
835
		}
831
		}
Line 836... Line 832...
836
 
832
 
837
		if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
833
		if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
838
			tile_flags |= R300_DEPTHMACROTILE_ENABLE;
834
			tile_flags |= R300_DEPTHMACROTILE_ENABLE;
839
		if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
835
		if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
-
 
836
			tile_flags |= R300_DEPTHMICROTILE_TILED;
-
 
837
		else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE)
Line 840... Line 838...
840
			tile_flags |= R300_DEPTHMICROTILE_TILED;;
838
			tile_flags |= R300_DEPTHMICROTILE_TILED_SQUARE;
841
 
839
 
842
		tmp = idx_value & ~(0x7 << 16);
840
		tmp = idx_value & ~(0x7 << 16);
Line 1345... Line 1343...
1345
	r300_errata(rdev);
1343
	r300_errata(rdev);
1346
	/* Initialize clocks */
1344
	/* Initialize clocks */
1347
	radeon_get_clock_info(rdev->ddev);
1345
	radeon_get_clock_info(rdev->ddev);
1348
	/* Initialize power management */
1346
	/* Initialize power management */
1349
	radeon_pm_init(rdev);
1347
	radeon_pm_init(rdev);
1350
	/* Get vram informations */
1348
	/* initialize AGP */
1351
	r300_vram_info(rdev);
-
 
1352
	/* Initialize memory controller (also test AGP) */
1349
	if (rdev->flags & RADEON_IS_AGP) {
1353
	r = r420_mc_init(rdev);
1350
		r = radeon_agp_init(rdev);
1354
    dbgprintf("mc vram location %x\n", rdev->mc.vram_location);
-
 
1355
	if (r)
1351
		if (r) {
-
 
1352
			radeon_agp_disable(rdev);
-
 
1353
		}
-
 
1354
	}
-
 
1355
	/* initialize memory controller */
1356
		return r;
1356
	r300_mc_init(rdev);
1357
	/* Fence driver */
1357
	/* Fence driver */
1358
//	r = radeon_fence_driver_init(rdev);
1358
//	r = radeon_fence_driver_init(rdev);
1359
//	if (r)
1359
//	if (r)
1360
//		return r;
1360
//		return r;
1361
//	r = radeon_irq_kms_init(rdev);
1361
//	r = radeon_irq_kms_init(rdev);