Subversion Repositories Kolibri OS

Rev

Rev 1428 | Rev 1963 | Go to most recent revision | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 1428 Rev 1430
Line 350... Line 350...
350
}
350
}
Line 351... Line 351...
351
 
351
 
352
/*
352
/*
353
 * R600 PCIE GART
353
 * R600 PCIE GART
354
 */
-
 
355
int r600_gart_clear_page(struct radeon_device *rdev, int i)
-
 
356
{
-
 
357
	void __iomem *ptr = (void *)rdev->gart.table.vram.ptr;
-
 
358
	u64 pte;
-
 
359
 
-
 
360
	if (i < 0 || i > rdev->gart.num_gpu_pages)
-
 
361
		return -EINVAL;
-
 
362
	pte = 0;
-
 
363
	writeq(pte, ((void __iomem *)ptr) + (i * 8));
-
 
364
	return 0;
-
 
365
}
-
 
366
 
354
 */
367
void r600_pcie_gart_tlb_flush(struct radeon_device *rdev)
355
void r600_pcie_gart_tlb_flush(struct radeon_device *rdev)
368
{
356
{
369
	unsigned i;
357
	unsigned i;
Line -... Line 358...
-
 
358
	u32 tmp;
-
 
359
 
-
 
360
	/* flush hdp cache so updates hit vram */
370
	u32 tmp;
361
	WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
371
 
362
 
372
	WREG32(VM_CONTEXT0_INVALIDATION_LOW_ADDR, rdev->mc.gtt_start >> 12);
363
	WREG32(VM_CONTEXT0_INVALIDATION_LOW_ADDR, rdev->mc.gtt_start >> 12);
373
	WREG32(VM_CONTEXT0_INVALIDATION_HIGH_ADDR, (rdev->mc.gtt_end - 1) >> 12);
364
	WREG32(VM_CONTEXT0_INVALIDATION_HIGH_ADDR, (rdev->mc.gtt_end - 1) >> 12);
374
	WREG32(VM_CONTEXT0_REQUEST_RESPONSE, REQUEST_TYPE(1));
365
	WREG32(VM_CONTEXT0_REQUEST_RESPONSE, REQUEST_TYPE(1));
Line 413... Line 404...
413
		return -EINVAL;
404
		return -EINVAL;
414
	}
405
	}
415
	r = radeon_gart_table_vram_pin(rdev);
406
	r = radeon_gart_table_vram_pin(rdev);
416
	if (r)
407
	if (r)
417
		return r;
408
		return r;
-
 
409
	radeon_gart_restore(rdev);
Line 418... Line 410...
418
 
410
 
419
	/* Setup L2 cache */
411
	/* Setup L2 cache */
420
	WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
412
	WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
421
				ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
413
				ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
Line 616... Line 608...
616
	/* we need to own VRAM, so turn off the VGA renderer here
608
	/* we need to own VRAM, so turn off the VGA renderer here
617
	 * to stop it overwriting our objects */
609
	 * to stop it overwriting our objects */
618
	rv515_vga_render_disable(rdev);
610
	rv515_vga_render_disable(rdev);
619
}
611
}
Line -... Line 612...
-
 
612
 
-
 
613
/**
-
 
614
 * r600_vram_gtt_location - try to find VRAM & GTT location
-
 
615
 * @rdev: radeon device structure holding all necessary informations
-
 
616
 * @mc: memory controller structure holding memory informations
-
 
617
 *
-
 
618
 * Function will place try to place VRAM at same place as in CPU (PCI)
-
 
619
 * address space as some GPU seems to have issue when we reprogram at
-
 
620
 * different address space.
-
 
621
 *
-
 
622
 * If there is not enough space to fit the unvisible VRAM after the
-
 
623
 * aperture then we limit the VRAM size to the aperture.
-
 
624
 *
-
 
625
 * If we are using AGP then place VRAM adjacent to AGP aperture are we need
-
 
626
 * them to be in one from GPU point of view so that we can program GPU to
-
 
627
 * catch access outside them (weird GPU policy see ??).
-
 
628
 *
-
 
629
 * This function will never fails, worst case are limiting VRAM or GTT.
-
 
630
 *
-
 
631
 * Note: GTT start, end, size should be initialized before calling this
-
 
632
 * function on AGP platform.
-
 
633
 */
-
 
634
void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
-
 
635
{
-
 
636
	u64 size_bf, size_af;
-
 
637
 
-
 
638
	if (mc->mc_vram_size > 0xE0000000) {
-
 
639
		/* leave room for at least 512M GTT */
-
 
640
		dev_warn(rdev->dev, "limiting VRAM\n");
-
 
641
		mc->real_vram_size = 0xE0000000;
-
 
642
		mc->mc_vram_size = 0xE0000000;
-
 
643
	}
-
 
644
	if (rdev->flags & RADEON_IS_AGP) {
-
 
645
		size_bf = mc->gtt_start;
-
 
646
		size_af = 0xFFFFFFFF - mc->gtt_end + 1;
-
 
647
		if (size_bf > size_af) {
-
 
648
			if (mc->mc_vram_size > size_bf) {
-
 
649
				dev_warn(rdev->dev, "limiting VRAM\n");
-
 
650
				mc->real_vram_size = size_bf;
-
 
651
				mc->mc_vram_size = size_bf;
-
 
652
			}
-
 
653
			mc->vram_start = mc->gtt_start - mc->mc_vram_size;
-
 
654
		} else {
-
 
655
			if (mc->mc_vram_size > size_af) {
-
 
656
				dev_warn(rdev->dev, "limiting VRAM\n");
-
 
657
				mc->real_vram_size = size_af;
-
 
658
				mc->mc_vram_size = size_af;
-
 
659
			}
-
 
660
			mc->vram_start = mc->gtt_end;
-
 
661
		}
-
 
662
		mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
-
 
663
		dev_info(rdev->dev, "VRAM: %lluM 0x%08llX - 0x%08llX (%lluM used)\n",
-
 
664
				mc->mc_vram_size >> 20, mc->vram_start,
-
 
665
				mc->vram_end, mc->real_vram_size >> 20);
-
 
666
	} else {
-
 
667
		u64 base = 0;
-
 
668
		if (rdev->flags & RADEON_IS_IGP)
-
 
669
			base = (RREG32(MC_VM_FB_LOCATION) & 0xFFFF) << 24;
-
 
670
		radeon_vram_location(rdev, &rdev->mc, base);
-
 
671
		radeon_gtt_location(rdev, mc);
-
 
672
	}
-
 
673
}
620
 
674
 
621
int r600_mc_init(struct radeon_device *rdev)
675
int r600_mc_init(struct radeon_device *rdev)
622
{
676
{
623
	fixed20_12 a;
677
	fixed20_12 a;
624
	u32 tmp;
678
	u32 tmp;
Line 655... Line 709...
655
	rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
709
	rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
656
	rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
710
	rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
657
	/* Setup GPU memory space */
711
	/* Setup GPU memory space */
658
	rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
712
	rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
659
	rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
713
	rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
660
 
-
 
-
 
714
	rdev->mc.visible_vram_size = rdev->mc.aper_size;
-
 
715
	/* FIXME remove this once we support unmappable VRAM */
661
	if (rdev->mc.mc_vram_size > rdev->mc.aper_size)
716
	if (rdev->mc.mc_vram_size > rdev->mc.aper_size) {
662
		rdev->mc.mc_vram_size = rdev->mc.aper_size;
717
		rdev->mc.mc_vram_size = rdev->mc.aper_size;
663
 
-
 
664
	if (rdev->mc.real_vram_size > rdev->mc.aper_size)
-
 
665
		rdev->mc.real_vram_size = rdev->mc.aper_size;
718
		rdev->mc.real_vram_size = rdev->mc.aper_size;
666
 
-
 
667
	if (rdev->flags & RADEON_IS_AGP) {
-
 
668
		/* gtt_size is setup by radeon_agp_init */
-
 
669
		rdev->mc.gtt_location = rdev->mc.agp_base;
-
 
670
		tmp = 0xFFFFFFFFUL - rdev->mc.agp_base - rdev->mc.gtt_size;
-
 
671
		/* Try to put vram before or after AGP because we
-
 
672
		 * we want SYSTEM_APERTURE to cover both VRAM and
-
 
673
		 * AGP so that GPU can catch out of VRAM/AGP access
-
 
674
		 */
-
 
675
		if (rdev->mc.gtt_location > rdev->mc.mc_vram_size) {
-
 
676
			/* Enough place before */
-
 
677
			rdev->mc.vram_location = rdev->mc.gtt_location -
-
 
678
							rdev->mc.mc_vram_size;
-
 
679
		} else if (tmp > rdev->mc.mc_vram_size) {
-
 
680
			/* Enough place after */
-
 
681
			rdev->mc.vram_location = rdev->mc.gtt_location +
-
 
682
							rdev->mc.gtt_size;
-
 
683
		} else {
-
 
684
			/* Try to setup VRAM then AGP might not
-
 
685
			 * not work on some card
-
 
686
			 */
-
 
687
			rdev->mc.vram_location = 0x00000000UL;
-
 
688
			rdev->mc.gtt_location = rdev->mc.mc_vram_size;
-
 
689
		}
-
 
690
	} else {
-
 
691
		rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
-
 
692
			rdev->mc.vram_location = (RREG32(MC_VM_FB_LOCATION) &
-
 
693
								0xFFFF) << 24;
-
 
694
			tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size;
-
 
695
			if ((0xFFFFFFFFUL - tmp) >= rdev->mc.gtt_size) {
-
 
696
				/* Enough place after vram */
-
 
697
				rdev->mc.gtt_location = tmp;
-
 
698
			} else if (rdev->mc.vram_location >= rdev->mc.gtt_size) {
-
 
699
				/* Enough place before vram */
-
 
700
				rdev->mc.gtt_location = 0;
-
 
701
			} else {
-
 
702
				/* Not enough place after or before shrink
-
 
703
				 * gart size
-
 
704
				 */
-
 
705
				if (rdev->mc.vram_location > (0xFFFFFFFFUL - tmp)) {
-
 
706
					rdev->mc.gtt_location = 0;
-
 
707
					rdev->mc.gtt_size = rdev->mc.vram_location;
-
 
708
				} else {
-
 
709
					rdev->mc.gtt_location = tmp;
-
 
710
					rdev->mc.gtt_size = 0xFFFFFFFFUL - tmp;
-
 
711
				}
-
 
712
			}
-
 
713
			rdev->mc.gtt_location = rdev->mc.mc_vram_size;
-
 
714
	}
719
		}
715
	rdev->mc.vram_start = rdev->mc.vram_location;
-
 
716
	rdev->mc.vram_end = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1;
-
 
717
	rdev->mc.gtt_start = rdev->mc.gtt_location;
720
	r600_vram_gtt_location(rdev, &rdev->mc);
718
	rdev->mc.gtt_end = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
-
 
719
	/* FIXME: we should enforce default clock in case GPU is not in
721
	/* FIXME: we should enforce default clock in case GPU is not in
720
	 * default setup
722
	 * default setup
721
	 */
723
	 */
722
	a.full = rfixed_const(100);
724
	a.full = rfixed_const(100);
723
	rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk);
725
	rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk);
724
	rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
726
	rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
725
 
-
 
726
	if (rdev->flags & RADEON_IS_IGP)
727
	if (rdev->flags & RADEON_IS_IGP)
727
		rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
728
		rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
728
 
-
 
729
	return 0;
729
	return 0;
730
}
730
}
Line 731... Line 731...
731
 
731
 
732
/* We doesn't check that the GPU really needs a reset we simply do the
732
/* We doesn't check that the GPU really needs a reset we simply do the
Line 978... Line 978...
978
 
978
 
979
void r600_gpu_init(struct radeon_device *rdev)
979
void r600_gpu_init(struct radeon_device *rdev)
980
{
980
{
981
	u32 tiling_config;
981
	u32 tiling_config;
-
 
982
	u32 ramcfg;
-
 
983
	u32 backend_map;
-
 
984
	u32 cc_rb_backend_disable;
982
	u32 ramcfg;
985
	u32 cc_gc_shader_pipe_config;
983
	u32 tmp;
986
	u32 tmp;
984
	int i, j;
987
	int i, j;
985
	u32 sq_config;
988
	u32 sq_config;
986
	u32 sq_gpr_resource_mgmt_1 = 0;
989
	u32 sq_gpr_resource_mgmt_1 = 0;
Line 1087... Line 1090...
1087
		tiling_config |= PIPE_TILING(3);
1090
		tiling_config |= PIPE_TILING(3);
1088
		break;
1091
		break;
1089
	default:
1092
	default:
1090
		break;
1093
		break;
1091
	}
1094
	}
-
 
1095
	rdev->config.r600.tiling_npipes = rdev->config.r600.max_tile_pipes;
-
 
1096
	rdev->config.r600.tiling_nbanks = 4 << ((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
1092
	tiling_config |= BANK_TILING((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
1097
	tiling_config |= BANK_TILING((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
1093
	tiling_config |= GROUP_SIZE(0);
1098
	tiling_config |= GROUP_SIZE(0);
-
 
1099
	rdev->config.r600.tiling_group_size = 256;
1094
	tmp = (ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT;
1100
	tmp = (ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT;
1095
	if (tmp > 3) {
1101
	if (tmp > 3) {
1096
		tiling_config |= ROW_TILING(3);
1102
		tiling_config |= ROW_TILING(3);
1097
		tiling_config |= SAMPLE_SPLIT(3);
1103
		tiling_config |= SAMPLE_SPLIT(3);
1098
	} else {
1104
	} else {
1099
		tiling_config |= ROW_TILING(tmp);
1105
		tiling_config |= ROW_TILING(tmp);
1100
		tiling_config |= SAMPLE_SPLIT(tmp);
1106
		tiling_config |= SAMPLE_SPLIT(tmp);
1101
	}
1107
	}
1102
	tiling_config |= BANK_SWAPS(1);
1108
	tiling_config |= BANK_SWAPS(1);
-
 
1109
 
-
 
1110
	cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000;
-
 
1111
	cc_rb_backend_disable |=
-
 
1112
		BACKEND_DISABLE((R6XX_MAX_BACKENDS_MASK << rdev->config.r600.max_backends) & R6XX_MAX_BACKENDS_MASK);
-
 
1113
 
-
 
1114
	cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00;
-
 
1115
	cc_gc_shader_pipe_config |=
-
 
1116
		INACTIVE_QD_PIPES((R6XX_MAX_PIPES_MASK << rdev->config.r600.max_pipes) & R6XX_MAX_PIPES_MASK);
-
 
1117
	cc_gc_shader_pipe_config |=
-
 
1118
		INACTIVE_SIMDS((R6XX_MAX_SIMDS_MASK << rdev->config.r600.max_simds) & R6XX_MAX_SIMDS_MASK);
-
 
1119
 
1103
	tmp = r600_get_tile_pipe_to_backend_map(rdev->config.r600.max_tile_pipes,
1120
	backend_map = r600_get_tile_pipe_to_backend_map(rdev->config.r600.max_tile_pipes,
-
 
1121
							(R6XX_MAX_BACKENDS -
1104
						rdev->config.r600.max_backends,
1122
							 r600_count_pipe_bits((cc_rb_backend_disable &
1105
						(0xff << rdev->config.r600.max_backends) & 0xff);
1123
									       R6XX_MAX_BACKENDS_MASK) >> 16)),
-
 
1124
							(cc_rb_backend_disable >> 16));
-
 
1125
 
1106
	tiling_config |= BACKEND_MAP(tmp);
1126
	tiling_config |= BACKEND_MAP(backend_map);
1107
	WREG32(GB_TILING_CONFIG, tiling_config);
1127
	WREG32(GB_TILING_CONFIG, tiling_config);
1108
	WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff);
1128
	WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff);
1109
	WREG32(HDP_TILING_CONFIG, tiling_config & 0xffff);
1129
	WREG32(HDP_TILING_CONFIG, tiling_config & 0xffff);
Line 1110... Line -...
1110
 
-
 
1111
	tmp = BACKEND_DISABLE((R6XX_MAX_BACKENDS_MASK << rdev->config.r600.max_backends) & R6XX_MAX_BACKENDS_MASK);
-
 
1112
	WREG32(CC_RB_BACKEND_DISABLE, tmp);
-
 
1113
 
1130
 
1114
	/* Setup pipes */
-
 
1115
	tmp = INACTIVE_QD_PIPES((R6XX_MAX_PIPES_MASK << rdev->config.r600.max_pipes) & R6XX_MAX_PIPES_MASK);
-
 
1116
	tmp |= INACTIVE_SIMDS((R6XX_MAX_SIMDS_MASK << rdev->config.r600.max_simds) & R6XX_MAX_SIMDS_MASK);
1131
	/* Setup pipes */
1117
	WREG32(CC_GC_SHADER_PIPE_CONFIG, tmp);
1132
	WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
Line 1118... Line 1133...
1118
	WREG32(GC_USER_SHADER_PIPE_CONFIG, tmp);
1133
	WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
1119
 
1134
 
1120
	tmp = R6XX_MAX_BACKENDS - r600_count_pipe_bits(tmp & INACTIVE_QD_PIPES_MASK);
1135
	tmp = R6XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
Line 1121... Line 1136...
1121
	WREG32(VGT_OUT_DEALLOC_CNTL, (tmp * 4) & DEALLOC_DIST_MASK);
1136
	WREG32(VGT_OUT_DEALLOC_CNTL, (tmp * 4) & DEALLOC_DIST_MASK);
1122
	WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((tmp * 4) - 2) & VTX_REUSE_DEPTH_MASK);
1137
	WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((tmp * 4) - 2) & VTX_REUSE_DEPTH_MASK);
Line 1709... Line 1724...
1709
}
1724
}
1710
void r600_fence_ring_emit(struct radeon_device *rdev,
1725
void r600_fence_ring_emit(struct radeon_device *rdev,
1711
			  struct radeon_fence *fence)
1726
			  struct radeon_fence *fence)
1712
{
1727
{
1713
	/* Also consider EVENT_WRITE_EOP.  it handles the interrupts + timestamps + events */
1728
	/* Also consider EVENT_WRITE_EOP.  it handles the interrupts + timestamps + events */
-
 
1729
 
-
 
1730
	radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE, 0));
-
 
1731
	radeon_ring_write(rdev, CACHE_FLUSH_AND_INV_EVENT);
-
 
1732
	/* wait for 3D idle clean */
-
 
1733
	radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
-
 
1734
	radeon_ring_write(rdev, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
-
 
1735
	radeon_ring_write(rdev, WAIT_3D_IDLE_bit | WAIT_3D_IDLECLEAN_bit);
1714
	/* Emit fence sequence & fire IRQ */
1736
	/* Emit fence sequence & fire IRQ */
1715
	radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
1737
	radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
1716
	radeon_ring_write(rdev, ((rdev->fence_drv.scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
1738
	radeon_ring_write(rdev, ((rdev->fence_drv.scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
1717
	radeon_ring_write(rdev, fence->seq);
1739
	radeon_ring_write(rdev, fence->seq);
1718
	radeon_ring_write(rdev, PACKET0(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0));
-
 
1719
	radeon_ring_write(rdev, 1);
-
 
1720
	/* CP_INTERRUPT packet 3 no longer exists, use packet 0 */
1740
	/* CP_INTERRUPT packet 3 no longer exists, use packet 0 */
1721
	radeon_ring_write(rdev, PACKET0(CP_INT_STATUS, 0));
1741
	radeon_ring_write(rdev, PACKET0(CP_INT_STATUS, 0));
1722
	radeon_ring_write(rdev, RB_INT_STAT);
1742
	radeon_ring_write(rdev, RB_INT_STAT);
1723
}
1743
}
1724
int r600_set_surface_reg(struct radeon_device *rdev, int reg,
1744
int r600_set_surface_reg(struct radeon_device *rdev, int reg,
Line 1858... Line 1878...
1858
	if (r)
1878
	if (r)
1859
		return r;
1879
		return r;
1860
	/* Initialize power management */
1880
	/* Initialize power management */
1861
	radeon_pm_init(rdev);
1881
	radeon_pm_init(rdev);
1862
	/* Fence driver */
1882
	/* Fence driver */
1863
//	r = radeon_fence_driver_init(rdev);
-
 
1864
//	if (r)
-
 
1865
//		return r;
-
 
1866
	if (rdev->flags & RADEON_IS_AGP) {
1883
	if (rdev->flags & RADEON_IS_AGP) {
1867
		r = radeon_agp_init(rdev);
1884
		r = radeon_agp_init(rdev);
1868
		if (r)
1885
		if (r)
1869
			radeon_agp_disable(rdev);
1886
			radeon_agp_disable(rdev);
1870
	}
1887
	}
1871
	r = r600_mc_init(rdev);
1888
	r = r600_mc_init(rdev);
1872
    dbgprintf("mc vram location %x\n", rdev->mc.vram_location);
-
 
1873
	if (r)
1889
	if (r)
1874
		return r;
1890
		return r;
1875
	/* Memory manager */
1891
	/* Memory manager */
1876
	r = radeon_bo_init(rdev);
1892
	r = radeon_bo_init(rdev);
1877
	if (r)
1893
	if (r)