Subversion Repositories Kolibri OS

Rev

Rev 1430 | Rev 2004 | Go to most recent revision | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 1430 Rev 1963
Line 25... Line 25...
25
 *          Alex Deucher
25
 *          Alex Deucher
26
 *          Jerome Glisse
26
 *          Jerome Glisse
27
 */
27
 */
28
#include 
28
#include 
29
//#include 
29
//#include 
-
 
30
#include 
30
#include "drmP.h"
31
#include "drmP.h"
31
#include "radeon.h"
32
#include "radeon.h"
-
 
33
#include "radeon_asic.h"
32
#include "radeon_drm.h"
34
#include "radeon_drm.h"
33
#include "rv770d.h"
35
#include "rv770d.h"
34
#include "atom.h"
36
#include "atom.h"
35
#include "avivod.h"
37
#include "avivod.h"
Line 36... Line 38...
36
 
38
 
37
#define R700_PFP_UCODE_SIZE 848
39
#define R700_PFP_UCODE_SIZE 848
Line 38... Line 40...
38
#define R700_PM4_UCODE_SIZE 1360
40
#define R700_PM4_UCODE_SIZE 1360
39
 
41
 
-
 
42
static void rv770_gpu_init(struct radeon_device *rdev);
Line 40... Line 43...
40
static void rv770_gpu_init(struct radeon_device *rdev);
43
void rv770_fini(struct radeon_device *rdev);
41
void rv770_fini(struct radeon_device *rdev);
44
static void rv770_pcie_gen2_enable(struct radeon_device *rdev);
42
 
45
 
Line 123... Line 126...
123
	}
126
	}
124
}
127
}
Line 125... Line 128...
125
 
128
 
126
void rv770_pcie_gart_fini(struct radeon_device *rdev)
129
void rv770_pcie_gart_fini(struct radeon_device *rdev)
-
 
130
{
127
{
131
	radeon_gart_fini(rdev);
128
	rv770_pcie_gart_disable(rdev);
132
	rv770_pcie_gart_disable(rdev);
129
	radeon_gart_table_vram_free(rdev);
-
 
130
    radeon_gart_fini(rdev);
133
	radeon_gart_table_vram_free(rdev);
Line 131... Line 134...
131
}
134
}
132
 
135
 
Line 170... Line 173...
170
		WREG32((0x2c18 + j), 0x00000000);
173
		WREG32((0x2c18 + j), 0x00000000);
171
		WREG32((0x2c1c + j), 0x00000000);
174
		WREG32((0x2c1c + j), 0x00000000);
172
		WREG32((0x2c20 + j), 0x00000000);
175
		WREG32((0x2c20 + j), 0x00000000);
173
		WREG32((0x2c24 + j), 0x00000000);
176
		WREG32((0x2c24 + j), 0x00000000);
174
	}
177
	}
-
 
178
	/* r7xx hw bug.  Read from HDP_DEBUG1 rather
175
	WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
179
	 * than writing to HDP_REG_COHERENCY_FLUSH_CNTL
-
 
180
	 */
-
 
181
	tmp = RREG32(HDP_DEBUG1);
Line 176... Line 182...
176
 
182
 
177
	rv515_mc_stop(rdev, &save);
183
	rv515_mc_stop(rdev, &save);
178
	if (r600_mc_wait_for_idle(rdev)) {
184
	if (r600_mc_wait_for_idle(rdev)) {
179
		dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
185
		dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
Line 205... Line 211...
205
	tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
211
	tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
206
	tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
212
	tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
207
	WREG32(MC_VM_FB_LOCATION, tmp);
213
	WREG32(MC_VM_FB_LOCATION, tmp);
208
	WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
214
	WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
209
	WREG32(HDP_NONSURFACE_INFO, (2 << 7));
215
	WREG32(HDP_NONSURFACE_INFO, (2 << 7));
210
	WREG32(HDP_NONSURFACE_SIZE, (rdev->mc.mc_vram_size - 1) | 0x3FF);
216
	WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF);
211
	if (rdev->flags & RADEON_IS_AGP) {
217
	if (rdev->flags & RADEON_IS_AGP) {
212
		WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 16);
218
		WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 16);
213
		WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 16);
219
		WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 16);
214
		WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22);
220
		WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22);
215
	} else {
221
	} else {
Line 230... Line 236...
230
/*
236
/*
231
 * CP.
237
 * CP.
232
 */
238
 */
233
void r700_cp_stop(struct radeon_device *rdev)
239
void r700_cp_stop(struct radeon_device *rdev)
234
{
240
{
-
 
241
//   radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
235
	WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT));
242
	WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT));
-
 
243
	WREG32(SCRATCH_UMSK, 0);
236
}
244
}
Line 237... Line -...
237
 
-
 
238
 
245
 
239
static int rv770_cp_load_microcode(struct radeon_device *rdev)
246
static int rv770_cp_load_microcode(struct radeon_device *rdev)
240
{
247
{
241
	const __be32 *fw_data;
248
	const __be32 *fw_data;
Line 242... Line 249...
242
	int i;
249
	int i;
243
 
250
 
Line 244... Line 251...
244
	if (!rdev->me_fw || !rdev->pfp_fw)
251
	if (!rdev->me_fw || !rdev->pfp_fw)
-
 
252
		return -EINVAL;
-
 
253
 
-
 
254
	r700_cp_stop(rdev);
-
 
255
	WREG32(CP_RB_CNTL,
245
		return -EINVAL;
256
#ifdef __BIG_ENDIAN
Line 246... Line 257...
246
 
257
	       BUF_SWAP_32BIT |
247
	r700_cp_stop(rdev);
258
#endif
248
	WREG32(CP_RB_CNTL, RB_NO_UPDATE | (15 << 8) | (3 << 0));
259
	       RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3));
249
 
260
 
Line 446... Line 457...
446
	}
457
	}
Line 447... Line 458...
447
 
458
 
448
	return backend_map;
459
	return backend_map;
Line -... Line 460...
-
 
460
}
-
 
461
 
-
 
462
static void rv770_program_channel_remap(struct radeon_device *rdev)
-
 
463
{
-
 
464
	u32 tcp_chan_steer, mc_shared_chremap, tmp;
-
 
465
	bool force_no_swizzle;
-
 
466
 
-
 
467
	switch (rdev->family) {
-
 
468
	case CHIP_RV770:
-
 
469
	case CHIP_RV730:
-
 
470
		force_no_swizzle = false;
-
 
471
		break;
-
 
472
	case CHIP_RV710:
-
 
473
	case CHIP_RV740:
-
 
474
	default:
-
 
475
		force_no_swizzle = true;
-
 
476
		break;
-
 
477
	}
-
 
478
 
-
 
479
	tmp = RREG32(MC_SHARED_CHMAP);
-
 
480
	switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
-
 
481
	case 0:
-
 
482
	case 1:
-
 
483
	default:
-
 
484
		/* default mapping */
-
 
485
		mc_shared_chremap = 0x00fac688;
-
 
486
		break;
-
 
487
	case 2:
-
 
488
	case 3:
-
 
489
		if (force_no_swizzle)
-
 
490
			mc_shared_chremap = 0x00fac688;
-
 
491
		else
-
 
492
			mc_shared_chremap = 0x00bbc298;
-
 
493
		break;
-
 
494
	}
-
 
495
 
-
 
496
	if (rdev->family == CHIP_RV740)
-
 
497
		tcp_chan_steer = 0x00ef2a60;
-
 
498
	else
-
 
499
		tcp_chan_steer = 0x00fac688;
-
 
500
 
-
 
501
	WREG32(TCP_CHAN_STEER, tcp_chan_steer);
-
 
502
	WREG32(MC_SHARED_CHREMAP, mc_shared_chremap);
449
}
503
}
450
 
504
 
451
static void rv770_gpu_init(struct radeon_device *rdev)
505
static void rv770_gpu_init(struct radeon_device *rdev)
452
{
506
{
453
	int i, j, num_qd_pipes;
507
	int i, j, num_qd_pipes;
Line 601... Line 655...
601
	if (rdev->family == CHIP_RV770)
655
	if (rdev->family == CHIP_RV770)
602
		gb_tiling_config |= BANK_TILING(1);
656
		gb_tiling_config |= BANK_TILING(1);
603
	else
657
	else
604
		gb_tiling_config |= BANK_TILING((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
658
		gb_tiling_config |= BANK_TILING((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
605
	rdev->config.rv770.tiling_nbanks = 4 << ((gb_tiling_config >> 4) & 0x3);
659
	rdev->config.rv770.tiling_nbanks = 4 << ((gb_tiling_config >> 4) & 0x3);
606
 
-
 
607
	gb_tiling_config |= GROUP_SIZE(0);
660
	gb_tiling_config |= GROUP_SIZE((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT);
-
 
661
	if ((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT)
-
 
662
		rdev->config.rv770.tiling_group_size = 512;
-
 
663
	else
608
	rdev->config.rv770.tiling_group_size = 256;
664
	rdev->config.rv770.tiling_group_size = 256;
609
 
-
 
610
	if (((mc_arb_ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT) > 3) {
665
	if (((mc_arb_ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT) > 3) {
611
		gb_tiling_config |= ROW_TILING(3);
666
		gb_tiling_config |= ROW_TILING(3);
612
		gb_tiling_config |= SAMPLE_SPLIT(3);
667
		gb_tiling_config |= SAMPLE_SPLIT(3);
613
	} else {
668
	} else {
614
		gb_tiling_config |=
669
		gb_tiling_config |=
Line 636... Line 691...
636
								rdev->config.rv770.max_tile_pipes,
691
								rdev->config.rv770.max_tile_pipes,
637
								(R7XX_MAX_BACKENDS -
692
								(R7XX_MAX_BACKENDS -
638
								 r600_count_pipe_bits((cc_rb_backend_disable &
693
								 r600_count_pipe_bits((cc_rb_backend_disable &
639
										       R7XX_MAX_BACKENDS_MASK) >> 16)),
694
										       R7XX_MAX_BACKENDS_MASK) >> 16)),
640
								(cc_rb_backend_disable >> 16));
695
								(cc_rb_backend_disable >> 16));
641
	gb_tiling_config |= BACKEND_MAP(backend_map);
-
 
Line -... Line 696...
-
 
696
 
-
 
697
	rdev->config.rv770.tile_config = gb_tiling_config;
Line 642... Line 698...
642
 
698
	gb_tiling_config |= BACKEND_MAP(backend_map);
643
 
699
 
644
	WREG32(GB_TILING_CONFIG, gb_tiling_config);
700
	WREG32(GB_TILING_CONFIG, gb_tiling_config);
Line -... Line 701...
-
 
701
	WREG32(DCP_TILING_CONFIG, (gb_tiling_config & 0xffff));
-
 
702
	WREG32(HDP_TILING_CONFIG, (gb_tiling_config & 0xffff));
645
	WREG32(DCP_TILING_CONFIG, (gb_tiling_config & 0xffff));
703
 
646
	WREG32(HDP_TILING_CONFIG, (gb_tiling_config & 0xffff));
704
	rv770_program_channel_remap(rdev);
-
 
705
 
647
 
706
	WREG32(CC_RB_BACKEND_DISABLE,      cc_rb_backend_disable);
Line 648... Line 707...
648
	WREG32(CC_RB_BACKEND_DISABLE,      cc_rb_backend_disable);
707
	WREG32(CC_GC_SHADER_PIPE_CONFIG,   cc_gc_shader_pipe_config);
649
	WREG32(CC_GC_SHADER_PIPE_CONFIG,   cc_gc_shader_pipe_config);
708
	WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
-
 
709
	WREG32(CC_SYS_RB_BACKEND_DISABLE,  cc_rb_backend_disable);
-
 
710
 
Line 650... Line 711...
650
	WREG32(CC_SYS_RB_BACKEND_DISABLE,  cc_rb_backend_disable);
711
	WREG32(CGTS_SYS_TCC_DISABLE, 0);
651
 
712
	WREG32(CGTS_TCC_DISABLE, 0);
652
	WREG32(CGTS_SYS_TCC_DISABLE, 0);
713
	WREG32(CGTS_USER_SYS_TCC_DISABLE, 0);
653
	WREG32(CGTS_TCC_DISABLE, 0);
714
	WREG32(CGTS_USER_TCC_DISABLE, 0);
Line 860... Line 921...
860
	WREG32(PA_CL_ENHANCE, (CLIP_VTX_REORDER_ENA |
921
	WREG32(PA_CL_ENHANCE, (CLIP_VTX_REORDER_ENA |
861
					  NUM_CLIP_SEQ(3)));
922
					  NUM_CLIP_SEQ(3)));
Line 862... Line 923...
862
 
923
 
Line -... Line 924...
-
 
924
}
-
 
925
 
-
 
926
static int rv770_vram_scratch_init(struct radeon_device *rdev)
-
 
927
{
-
 
928
	int r;
-
 
929
	u64 gpu_addr;
-
 
930
 
-
 
931
	if (rdev->vram_scratch.robj == NULL) {
-
 
932
		r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE,
-
 
933
				     PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
-
 
934
				     &rdev->vram_scratch.robj);
-
 
935
		if (r) {
-
 
936
			return r;
-
 
937
		}
-
 
938
	}
-
 
939
 
-
 
940
	r = radeon_bo_reserve(rdev->vram_scratch.robj, false);
-
 
941
	if (unlikely(r != 0))
-
 
942
		return r;
-
 
943
	r = radeon_bo_pin(rdev->vram_scratch.robj,
-
 
944
			  RADEON_GEM_DOMAIN_VRAM, &gpu_addr);
-
 
945
	if (r) {
-
 
946
		radeon_bo_unreserve(rdev->vram_scratch.robj);
-
 
947
		return r;
-
 
948
	}
-
 
949
	r = radeon_bo_kmap(rdev->vram_scratch.robj,
-
 
950
				(void **)&rdev->vram_scratch.ptr);
-
 
951
	if (r)
-
 
952
		radeon_bo_unpin(rdev->vram_scratch.robj);
-
 
953
	radeon_bo_unreserve(rdev->vram_scratch.robj);
-
 
954
 
-
 
955
	return r;
-
 
956
}
-
 
957
 
-
 
958
static void rv770_vram_scratch_fini(struct radeon_device *rdev)
-
 
959
{
-
 
960
	int r;
-
 
961
 
-
 
962
	if (rdev->vram_scratch.robj == NULL) {
-
 
963
		return;
-
 
964
	}
-
 
965
	r = radeon_bo_reserve(rdev->vram_scratch.robj, false);
-
 
966
	if (likely(r == 0)) {
-
 
967
		radeon_bo_kunmap(rdev->vram_scratch.robj);
-
 
968
		radeon_bo_unpin(rdev->vram_scratch.robj);
-
 
969
		radeon_bo_unreserve(rdev->vram_scratch.robj);
-
 
970
	}
-
 
971
	radeon_bo_unref(&rdev->vram_scratch.robj);
-
 
972
}
-
 
973
 
-
 
974
void r700_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
-
 
975
{
-
 
976
	u64 size_bf, size_af;
-
 
977
 
-
 
978
	if (mc->mc_vram_size > 0xE0000000) {
-
 
979
		/* leave room for at least 512M GTT */
-
 
980
		dev_warn(rdev->dev, "limiting VRAM\n");
-
 
981
		mc->real_vram_size = 0xE0000000;
-
 
982
		mc->mc_vram_size = 0xE0000000;
-
 
983
	}
-
 
984
	if (rdev->flags & RADEON_IS_AGP) {
-
 
985
		size_bf = mc->gtt_start;
-
 
986
		size_af = 0xFFFFFFFF - mc->gtt_end + 1;
-
 
987
		if (size_bf > size_af) {
-
 
988
			if (mc->mc_vram_size > size_bf) {
-
 
989
				dev_warn(rdev->dev, "limiting VRAM\n");
-
 
990
				mc->real_vram_size = size_bf;
-
 
991
				mc->mc_vram_size = size_bf;
-
 
992
			}
-
 
993
			mc->vram_start = mc->gtt_start - mc->mc_vram_size;
-
 
994
		} else {
-
 
995
			if (mc->mc_vram_size > size_af) {
-
 
996
				dev_warn(rdev->dev, "limiting VRAM\n");
-
 
997
				mc->real_vram_size = size_af;
-
 
998
				mc->mc_vram_size = size_af;
-
 
999
			}
-
 
1000
			mc->vram_start = mc->gtt_end;
-
 
1001
		}
-
 
1002
		mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
-
 
1003
		dev_info(rdev->dev, "VRAM: %lluM 0x%08llX - 0x%08llX (%lluM used)\n",
-
 
1004
				mc->mc_vram_size >> 20, mc->vram_start,
-
 
1005
				mc->vram_end, mc->real_vram_size >> 20);
-
 
1006
	} else {
-
 
1007
		radeon_vram_location(rdev, &rdev->mc, 0);
-
 
1008
		rdev->mc.gtt_base_align = 0;
-
 
1009
		radeon_gtt_location(rdev, mc);
-
 
1010
	}
863
}
1011
}
864
 
1012
 
865
int rv770_mc_init(struct radeon_device *rdev)
-
 
866
{
1013
int rv770_mc_init(struct radeon_device *rdev)
867
	fixed20_12 a;
1014
{
Line 868... Line 1015...
868
	u32 tmp;
1015
	u32 tmp;
869
	int chansize, numchan;
1016
	int chansize, numchan;
Line 894... Line 1041...
894
		numchan = 8;
1041
		numchan = 8;
895
		break;
1042
		break;
896
	}
1043
	}
897
	rdev->mc.vram_width = numchan * chansize;
1044
	rdev->mc.vram_width = numchan * chansize;
898
	/* Could aper size report 0 ? */
1045
	/* Could aper size report 0 ? */
899
	rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
1046
	rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
900
	rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
1047
	rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
901
	/* Setup GPU memory space */
1048
	/* Setup GPU memory space */
902
	rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
1049
	rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
903
	rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
1050
	rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
904
	rdev->mc.visible_vram_size = rdev->mc.aper_size;
1051
	rdev->mc.visible_vram_size = rdev->mc.aper_size;
905
	/* FIXME remove this once we support unmappable VRAM */
-
 
906
	if (rdev->mc.mc_vram_size > rdev->mc.aper_size) {
-
 
907
		rdev->mc.mc_vram_size = rdev->mc.aper_size;
-
 
908
		rdev->mc.real_vram_size = rdev->mc.aper_size;
-
 
909
		}
-
 
910
	r600_vram_gtt_location(rdev, &rdev->mc);
1052
	r700_vram_gtt_location(rdev, &rdev->mc);
911
	/* FIXME: we should enforce default clock in case GPU is not in
-
 
912
	 * default setup
-
 
913
	 */
-
 
914
	a.full = rfixed_const(100);
1053
	radeon_update_bandwidth_info(rdev);
915
	rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk);
-
 
916
	rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
-
 
917
	return 0;
-
 
918
}
-
 
Line 919... Line -...
919
 
-
 
920
int rv770_gpu_reset(struct radeon_device *rdev)
-
 
921
{
-
 
922
	/* FIXME: implement any rv770 specific bits */
1054
 
923
	return r600_gpu_reset(rdev);
1055
	return 0;
Line 924... Line 1056...
924
}
1056
}
925
 
1057
 
926
static int rv770_startup(struct radeon_device *rdev)
1058
static int rv770_startup(struct radeon_device *rdev)
Line -... Line 1059...
-
 
1059
{
-
 
1060
	int r;
-
 
1061
 
927
{
1062
	/* enable pcie gen2 link */
928
	int r;
1063
	rv770_pcie_gen2_enable(rdev);
929
 
1064
 
930
	if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
1065
	if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
931
		r = r600_init_microcode(rdev);
1066
		r = r600_init_microcode(rdev);
Line 941... Line 1076...
941
	} else {
1076
	} else {
942
		r = rv770_pcie_gart_enable(rdev);
1077
		r = rv770_pcie_gart_enable(rdev);
943
		if (r)
1078
		if (r)
944
			return r;
1079
			return r;
945
	}
1080
	}
-
 
1081
	r = rv770_vram_scratch_init(rdev);
-
 
1082
	if (r)
-
 
1083
		return r;
946
	rv770_gpu_init(rdev);
1084
	rv770_gpu_init(rdev);
947
	r = radeon_ring_init(rdev, rdev->cp.ring_size);
1085
	r = radeon_ring_init(rdev, rdev->cp.ring_size);
948
	if (r)
1086
	if (r)
949
		return r;
1087
		return r;
950
	r = rv770_cp_load_microcode(rdev);
1088
	r = rv770_cp_load_microcode(rdev);
951
	if (r)
1089
	if (r)
952
		return r;
1090
		return r;
953
	r = r600_cp_resume(rdev);
1091
	r = r600_cp_resume(rdev);
954
	if (r)
1092
	if (r)
955
		return r;
1093
		return r;
956
	/* write back buffer are not vital so don't worry about failure */
-
 
957
//	r600_wb_enable(rdev);
-
 
-
 
1094
 
958
	return 0;
1095
	return 0;
959
}
1096
}
Line 972... Line 1109...
972
 */
1109
 */
973
int rv770_init(struct radeon_device *rdev)
1110
int rv770_init(struct radeon_device *rdev)
974
{
1111
{
975
	int r;
1112
	int r;
Line 976... Line -...
976
 
-
 
977
	r = radeon_dummy_page_init(rdev);
-
 
978
	if (r)
-
 
979
		return r;
-
 
980
	/* This don't do much */
-
 
981
	r = radeon_gem_init(rdev);
-
 
982
	if (r)
-
 
983
		return r;
1113
 
984
	/* Read BIOS */
1114
	/* Read BIOS */
985
	if (!radeon_get_bios(rdev)) {
1115
	if (!radeon_get_bios(rdev)) {
986
		if (ASIC_IS_AVIVO(rdev))
1116
		if (ASIC_IS_AVIVO(rdev))
987
			return -EINVAL;
1117
			return -EINVAL;
Line 993... Line 1123...
993
	}
1123
	}
994
	r = radeon_atombios_init(rdev);
1124
	r = radeon_atombios_init(rdev);
995
	if (r)
1125
	if (r)
996
		return r;
1126
		return r;
997
	/* Post card if necessary */
1127
	/* Post card if necessary */
998
	if (!r600_card_posted(rdev)) {
1128
	if (!radeon_card_posted(rdev)) {
999
		if (!rdev->bios) {
1129
		if (!rdev->bios) {
1000
			dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
1130
			dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
1001
			return -EINVAL;
1131
			return -EINVAL;
1002
		}
1132
		}
1003
		DRM_INFO("GPU not posted. posting now...\n");
1133
		DRM_INFO("GPU not posted. posting now...\n");
Line 1007... Line 1137...
1007
	r600_scratch_init(rdev);
1137
	r600_scratch_init(rdev);
1008
	/* Initialize surface registers */
1138
	/* Initialize surface registers */
1009
	radeon_surface_init(rdev);
1139
	radeon_surface_init(rdev);
1010
	/* Initialize clocks */
1140
	/* Initialize clocks */
1011
	radeon_get_clock_info(rdev->ddev);
1141
	radeon_get_clock_info(rdev->ddev);
1012
	r = radeon_clocks_init(rdev);
-
 
1013
	if (r)
-
 
1014
		return r;
-
 
1015
	/* Initialize power management */
-
 
1016
	radeon_pm_init(rdev);
-
 
1017
	/* Fence driver */
1142
	/* Fence driver */
1018
//   r = radeon_fence_driver_init(rdev);
1143
//	r = radeon_fence_driver_init(rdev);
1019
//   if (r)
1144
//	if (r)
1020
//       return r;
1145
//		return r;
1021
	/* initialize AGP */
1146
	/* initialize AGP */
Line 1060... Line 1185...
1060
//				dev_err(rdev->dev, "IB test failed (%d).\n", r);
1185
//				dev_err(rdev->dev, "IB test failed (%d).\n", r);
1061
//				rdev->accel_working = false;
1186
//				rdev->accel_working = false;
1062
//			}
1187
//			}
1063
//		}
1188
//		}
1064
	}
1189
	}
-
 
1190
 
1065
	return 0;
1191
	return 0;
1066
}
1192
}
Line -... Line 1193...
-
 
1193
 
-
 
1194
static void rv770_pcie_gen2_enable(struct radeon_device *rdev)
-
 
1195
{
-
 
1196
	u32 link_width_cntl, lanes, speed_cntl, tmp;
-
 
1197
	u16 link_cntl2;
-
 
1198
 
-
 
1199
	if (radeon_pcie_gen2 == 0)
-
 
1200
		return;
-
 
1201
 
-
 
1202
	if (rdev->flags & RADEON_IS_IGP)
-
 
1203
		return;
-
 
1204
 
-
 
1205
	if (!(rdev->flags & RADEON_IS_PCIE))
-
 
1206
		return;
-
 
1207
 
-
 
1208
	/* x2 cards have a special sequence */
-
 
1209
	if (ASIC_IS_X2(rdev))
-
 
1210
		return;
-
 
1211
 
-
 
1212
	/* advertise upconfig capability */
-
 
1213
	link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
-
 
1214
	link_width_cntl &= ~LC_UPCONFIGURE_DIS;
-
 
1215
	WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
-
 
1216
	link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
-
 
1217
	if (link_width_cntl & LC_RENEGOTIATION_SUPPORT) {
-
 
1218
		lanes = (link_width_cntl & LC_LINK_WIDTH_RD_MASK) >> LC_LINK_WIDTH_RD_SHIFT;
-
 
1219
		link_width_cntl &= ~(LC_LINK_WIDTH_MASK |
-
 
1220
				     LC_RECONFIG_ARC_MISSING_ESCAPE);
-
 
1221
		link_width_cntl |= lanes | LC_RECONFIG_NOW |
-
 
1222
			LC_RENEGOTIATE_EN | LC_UPCONFIGURE_SUPPORT;
-
 
1223
		WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
-
 
1224
	} else {
-
 
1225
		link_width_cntl |= LC_UPCONFIGURE_DIS;
-
 
1226
		WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
-
 
1227
	}
-
 
1228
 
-
 
1229
	speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
-
 
1230
	if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) &&
-
 
1231
	    (speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) {
-
 
1232
 
-
 
1233
		tmp = RREG32(0x541c);
-
 
1234
		WREG32(0x541c, tmp | 0x8);
-
 
1235
		WREG32(MM_CFGREGS_CNTL, MM_WR_TO_CFG_EN);
-
 
1236
		link_cntl2 = RREG16(0x4088);
-
 
1237
		link_cntl2 &= ~TARGET_LINK_SPEED_MASK;
-
 
1238
		link_cntl2 |= 0x2;
-
 
1239
		WREG16(0x4088, link_cntl2);
-
 
1240
		WREG32(MM_CFGREGS_CNTL, 0);
-
 
1241
 
-
 
1242
		speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
-
 
1243
		speed_cntl &= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN;
-
 
1244
		WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
-
 
1245
 
-
 
1246
		speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
-
 
1247
		speed_cntl |= LC_CLR_FAILED_SPD_CHANGE_CNT;
-
 
1248
		WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
-
 
1249
 
-
 
1250
		speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
-
 
1251
		speed_cntl &= ~LC_CLR_FAILED_SPD_CHANGE_CNT;
-
 
1252
		WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
-
 
1253
 
-
 
1254
		speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
-
 
1255
		speed_cntl |= LC_GEN2_EN_STRAP;
-
 
1256
		WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
-
 
1257
 
-
 
1258
	} else {
-
 
1259
		link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
-
 
1260
		/* XXX: only disable it if gen1 bridge vendor == 0x111d or 0x1106 */
-
 
1261
		if (1)
-
 
1262
			link_width_cntl |= LC_UPCONFIGURE_DIS;
-
 
1263
		else
-
 
1264
			link_width_cntl &= ~LC_UPCONFIGURE_DIS;
-
 
1265
		WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
-
 
1266
	}