Subversion Repositories Kolibri OS

Rev

Rev 1430 | Rev 1986 | Go to most recent revision | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 1430 Rev 1963
Line 23... Line 23...
23
 *
23
 *
24
 * Authors: Dave Airlie
24
 * Authors: Dave Airlie
25
 *          Alex Deucher
25
 *          Alex Deucher
26
 *          Jerome Glisse
26
 *          Jerome Glisse
27
 */
27
 */
-
 
28
#include 
28
#include 
29
#include 
29
#include 
30
#include 
30
#include "drmP.h"
31
#include "drmP.h"
31
#include "radeon_drm.h"
32
#include "radeon_drm.h"
32
#include "radeon.h"
33
#include "radeon.h"
-
 
34
#include "radeon_asic.h"
33
#include "radeon_mode.h"
35
#include "radeon_mode.h"
34
#include "r600d.h"
36
#include "r600d.h"
35
#include "atom.h"
37
#include "atom.h"
36
#include "avivod.h"
38
#include "avivod.h"
Line 39... Line 41...
39
#define PM4_UCODE_SIZE 1792
41
#define PM4_UCODE_SIZE 1792
40
#define RLC_UCODE_SIZE 768
42
#define RLC_UCODE_SIZE 768
41
#define R700_PFP_UCODE_SIZE 848
43
#define R700_PFP_UCODE_SIZE 848
42
#define R700_PM4_UCODE_SIZE 1360
44
#define R700_PM4_UCODE_SIZE 1360
43
#define R700_RLC_UCODE_SIZE 1024
45
#define R700_RLC_UCODE_SIZE 1024
-
 
46
#define EVERGREEN_PFP_UCODE_SIZE 1120
-
 
47
#define EVERGREEN_PM4_UCODE_SIZE 1376
-
 
48
#define EVERGREEN_RLC_UCODE_SIZE 768
-
 
49
#define CAYMAN_RLC_UCODE_SIZE 1024
Line 44... Line 50...
44
 
50
 
45
/* Firmware Names */
51
/* Firmware Names */
46
MODULE_FIRMWARE("radeon/R600_pfp.bin");
52
MODULE_FIRMWARE("radeon/R600_pfp.bin");
47
MODULE_FIRMWARE("radeon/R600_me.bin");
53
MODULE_FIRMWARE("radeon/R600_me.bin");
Line 63... Line 69...
63
MODULE_FIRMWARE("radeon/RV730_me.bin");
69
MODULE_FIRMWARE("radeon/RV730_me.bin");
64
MODULE_FIRMWARE("radeon/RV710_pfp.bin");
70
MODULE_FIRMWARE("radeon/RV710_pfp.bin");
65
MODULE_FIRMWARE("radeon/RV710_me.bin");
71
MODULE_FIRMWARE("radeon/RV710_me.bin");
66
MODULE_FIRMWARE("radeon/R600_rlc.bin");
72
MODULE_FIRMWARE("radeon/R600_rlc.bin");
67
MODULE_FIRMWARE("radeon/R700_rlc.bin");
73
MODULE_FIRMWARE("radeon/R700_rlc.bin");
-
 
74
MODULE_FIRMWARE("radeon/CEDAR_pfp.bin");
-
 
75
MODULE_FIRMWARE("radeon/CEDAR_me.bin");
-
 
76
MODULE_FIRMWARE("radeon/CEDAR_rlc.bin");
-
 
77
MODULE_FIRMWARE("radeon/REDWOOD_pfp.bin");
-
 
78
MODULE_FIRMWARE("radeon/REDWOOD_me.bin");
-
 
79
MODULE_FIRMWARE("radeon/REDWOOD_rlc.bin");
-
 
80
MODULE_FIRMWARE("radeon/JUNIPER_pfp.bin");
-
 
81
MODULE_FIRMWARE("radeon/JUNIPER_me.bin");
-
 
82
MODULE_FIRMWARE("radeon/JUNIPER_rlc.bin");
-
 
83
MODULE_FIRMWARE("radeon/CYPRESS_pfp.bin");
-
 
84
MODULE_FIRMWARE("radeon/CYPRESS_me.bin");
-
 
85
MODULE_FIRMWARE("radeon/CYPRESS_rlc.bin");
-
 
86
MODULE_FIRMWARE("radeon/PALM_pfp.bin");
-
 
87
MODULE_FIRMWARE("radeon/PALM_me.bin");
-
 
88
MODULE_FIRMWARE("radeon/SUMO_rlc.bin");
-
 
89
MODULE_FIRMWARE("radeon/SUMO_pfp.bin");
-
 
90
MODULE_FIRMWARE("radeon/SUMO_me.bin");
-
 
91
MODULE_FIRMWARE("radeon/SUMO2_pfp.bin");
-
 
92
MODULE_FIRMWARE("radeon/SUMO2_me.bin");
Line 68... Line 93...
68
 
93
 
Line 69... Line 94...
69
int r600_debugfs_mc_info_init(struct radeon_device *rdev);
94
int r600_debugfs_mc_info_init(struct radeon_device *rdev);
70
 
95
 
71
/* r600,rv610,rv630,rv620,rv635,rv670 */
96
/* r600,rv610,rv630,rv620,rv635,rv670 */
72
int r600_mc_wait_for_idle(struct radeon_device *rdev);
97
int r600_mc_wait_for_idle(struct radeon_device *rdev);
-
 
98
void r600_gpu_init(struct radeon_device *rdev);
-
 
99
void r600_fini(struct radeon_device *rdev);
-
 
100
void r600_irq_disable(struct radeon_device *rdev);
-
 
101
static void r600_pcie_gen2_enable(struct radeon_device *rdev);
-
 
102
 
-
 
103
/* get temperature in millidegrees */
-
 
104
int rv6xx_get_temp(struct radeon_device *rdev)
-
 
105
{
-
 
106
	u32 temp = (RREG32(CG_THERMAL_STATUS) & ASIC_T_MASK) >>
-
 
107
		ASIC_T_SHIFT;
-
 
108
	int actual_temp = temp & 0xff;
-
 
109
 
-
 
110
	if (temp & 0x100)
-
 
111
		actual_temp -= 256;
-
 
112
 
-
 
113
	return actual_temp * 1000;
-
 
114
}
-
 
115
 
-
 
116
 
-
 
117
 
-
 
118
 
-
 
119
 
-
 
120
 
-
 
121
bool r600_gui_idle(struct radeon_device *rdev)
-
 
122
{
-
 
123
	if (RREG32(GRBM_STATUS) & GUI_ACTIVE)
-
 
124
		return false;
-
 
125
	else
Line 73... Line 126...
73
void r600_gpu_init(struct radeon_device *rdev);
126
		return true;
74
void r600_fini(struct radeon_device *rdev);
127
}
75
 
128
 
76
/* hpd for digital panel detect/disconnect */
129
/* hpd for digital panel detect/disconnect */
Line 356... Line 409...
356
{
409
{
357
	unsigned i;
410
	unsigned i;
358
	u32 tmp;
411
	u32 tmp;
Line 359... Line 412...
359
 
412
 
-
 
413
	/* flush hdp cache so updates hit vram */
-
 
414
	if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) &&
-
 
415
	    !(rdev->flags & RADEON_IS_AGP)) {
-
 
416
		void __iomem *ptr = (void *)rdev->gart.table.vram.ptr;
-
 
417
		u32 tmp;
-
 
418
 
-
 
419
		/* r7xx hw bug.  write to HDP_DEBUG1 followed by fb read
-
 
420
		 * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL
-
 
421
		 * This seems to cause problems on some AGP cards. Just use the old
-
 
422
		 * method for them.
-
 
423
		 */
-
 
424
		WREG32(HDP_DEBUG1, 0);
-
 
425
		tmp = readl((void __iomem *)ptr);
360
	/* flush hdp cache so updates hit vram */
426
	} else
Line 361... Line 427...
361
	WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
427
	WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
362
 
428
 
363
	WREG32(VM_CONTEXT0_INVALIDATION_LOW_ADDR, rdev->mc.gtt_start >> 12);
429
	WREG32(VM_CONTEXT0_INVALIDATION_LOW_ADDR, rdev->mc.gtt_start >> 12);
Line 381... Line 447...
381
int r600_pcie_gart_init(struct radeon_device *rdev)
447
int r600_pcie_gart_init(struct radeon_device *rdev)
382
{
448
{
383
	int r;
449
	int r;
Line 384... Line 450...
384
 
450
 
385
	if (rdev->gart.table.vram.robj) {
451
	if (rdev->gart.table.vram.robj) {
386
		WARN(1, "R600 PCIE GART already initialized.\n");
452
		WARN(1, "R600 PCIE GART already initialized\n");
387
		return 0;
453
		return 0;
388
	}
454
	}
389
	/* Initialize common gart structure */
455
	/* Initialize common gart structure */
390
	r = radeon_gart_init(rdev);
456
	r = radeon_gart_init(rdev);
Line 488... Line 554...
488
	}
554
	}
489
}
555
}
Line 490... Line 556...
490
 
556
 
491
void r600_pcie_gart_fini(struct radeon_device *rdev)
557
void r600_pcie_gart_fini(struct radeon_device *rdev)
-
 
558
{
492
{
559
	radeon_gart_fini(rdev);
493
	r600_pcie_gart_disable(rdev);
560
	r600_pcie_gart_disable(rdev);
494
	radeon_gart_table_vram_free(rdev);
-
 
495
	radeon_gart_fini(rdev);
561
	radeon_gart_table_vram_free(rdev);
Line 496... Line 562...
496
}
562
}
497
 
563
 
498
void r600_agp_enable(struct radeon_device *rdev)
564
void r600_agp_enable(struct radeon_device *rdev)
Line 589... Line 655...
589
	tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
655
	tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
590
	tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
656
	tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
591
	WREG32(MC_VM_FB_LOCATION, tmp);
657
	WREG32(MC_VM_FB_LOCATION, tmp);
592
	WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
658
	WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
593
	WREG32(HDP_NONSURFACE_INFO, (2 << 7));
659
	WREG32(HDP_NONSURFACE_INFO, (2 << 7));
594
	WREG32(HDP_NONSURFACE_SIZE, rdev->mc.mc_vram_size | 0x3FF);
660
	WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF);
595
	if (rdev->flags & RADEON_IS_AGP) {
661
	if (rdev->flags & RADEON_IS_AGP) {
596
		WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 22);
662
		WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 22);
597
		WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 22);
663
		WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 22);
598
		WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22);
664
		WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22);
599
	} else {
665
	} else {
Line 629... Line 695...
629
 * This function will never fails, worst case are limiting VRAM or GTT.
695
 * This function will never fails, worst case are limiting VRAM or GTT.
630
 *
696
 *
631
 * Note: GTT start, end, size should be initialized before calling this
697
 * Note: GTT start, end, size should be initialized before calling this
632
 * function on AGP platform.
698
 * function on AGP platform.
633
 */
699
 */
634
void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
700
static void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
635
{
701
{
636
	u64 size_bf, size_af;
702
	u64 size_bf, size_af;
Line 637... Line 703...
637
 
703
 
638
	if (mc->mc_vram_size > 0xE0000000) {
704
	if (mc->mc_vram_size > 0xE0000000) {
Line 663... Line 729...
663
		dev_info(rdev->dev, "VRAM: %lluM 0x%08llX - 0x%08llX (%lluM used)\n",
729
		dev_info(rdev->dev, "VRAM: %lluM 0x%08llX - 0x%08llX (%lluM used)\n",
664
				mc->mc_vram_size >> 20, mc->vram_start,
730
				mc->mc_vram_size >> 20, mc->vram_start,
665
				mc->vram_end, mc->real_vram_size >> 20);
731
				mc->vram_end, mc->real_vram_size >> 20);
666
	} else {
732
	} else {
667
		u64 base = 0;
733
		u64 base = 0;
668
		if (rdev->flags & RADEON_IS_IGP)
734
		if (rdev->flags & RADEON_IS_IGP) {
669
			base = (RREG32(MC_VM_FB_LOCATION) & 0xFFFF) << 24;
735
			base = RREG32(MC_VM_FB_LOCATION) & 0xFFFF;
-
 
736
			base <<= 24;
-
 
737
		}
670
		radeon_vram_location(rdev, &rdev->mc, base);
738
		radeon_vram_location(rdev, &rdev->mc, base);
-
 
739
		rdev->mc.gtt_base_align = 0;
671
		radeon_gtt_location(rdev, mc);
740
		radeon_gtt_location(rdev, mc);
672
	}
741
	}
673
}
742
}
Line 674... Line 743...
674
 
743
 
675
int r600_mc_init(struct radeon_device *rdev)
744
int r600_mc_init(struct radeon_device *rdev)
676
{
-
 
677
	fixed20_12 a;
745
{
678
	u32 tmp;
746
	u32 tmp;
Line 679... Line 747...
679
	int chansize, numchan;
747
	int chansize, numchan;
680
 
748
 
Line 704... Line 772...
704
		numchan = 8;
772
		numchan = 8;
705
		break;
773
		break;
706
	}
774
	}
707
	rdev->mc.vram_width = numchan * chansize;
775
	rdev->mc.vram_width = numchan * chansize;
708
	/* Could aper size report 0 ? */
776
	/* Could aper size report 0 ? */
709
	rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
777
	rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
710
	rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
778
	rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
711
	/* Setup GPU memory space */
779
	/* Setup GPU memory space */
712
	rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
780
	rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
713
	rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
781
	rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
714
	rdev->mc.visible_vram_size = rdev->mc.aper_size;
782
	rdev->mc.visible_vram_size = rdev->mc.aper_size;
715
	/* FIXME remove this once we support unmappable VRAM */
-
 
716
	if (rdev->mc.mc_vram_size > rdev->mc.aper_size) {
-
 
717
		rdev->mc.mc_vram_size = rdev->mc.aper_size;
-
 
718
		rdev->mc.real_vram_size = rdev->mc.aper_size;
-
 
719
		}
-
 
720
	r600_vram_gtt_location(rdev, &rdev->mc);
783
	r600_vram_gtt_location(rdev, &rdev->mc);
721
	/* FIXME: we should enforce default clock in case GPU is not in
-
 
722
	 * default setup
-
 
723
	 */
784
 
724
	a.full = rfixed_const(100);
-
 
725
	rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk);
-
 
726
	rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
-
 
727
	if (rdev->flags & RADEON_IS_IGP)
785
	if (rdev->flags & RADEON_IS_IGP) {
-
 
786
		rs690_pm_info(rdev);
728
		rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
787
		rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
-
 
788
	}
-
 
789
	radeon_update_bandwidth_info(rdev);
729
	return 0;
790
	return 0;
730
}
791
}
Line 731... Line 792...
731
 
792
 
732
/* We doesn't check that the GPU really needs a reset we simply do the
793
/* We doesn't check that the GPU really needs a reset we simply do the
Line 750... Line 811...
750
			S_008014_TA2_BUSY(1) | S_008014_TA3_BUSY(1) |
811
			S_008014_TA2_BUSY(1) | S_008014_TA3_BUSY(1) |
751
			S_008014_DB0_BUSY(1) | S_008014_DB1_BUSY(1) |
812
			S_008014_DB0_BUSY(1) | S_008014_DB1_BUSY(1) |
752
			S_008014_DB2_BUSY(1) | S_008014_DB3_BUSY(1) |
813
			S_008014_DB2_BUSY(1) | S_008014_DB3_BUSY(1) |
753
			S_008014_CB0_BUSY(1) | S_008014_CB1_BUSY(1) |
814
			S_008014_CB0_BUSY(1) | S_008014_CB1_BUSY(1) |
754
			S_008014_CB2_BUSY(1) | S_008014_CB3_BUSY(1);
815
			S_008014_CB2_BUSY(1) | S_008014_CB3_BUSY(1);
755
	u32 srbm_reset = 0;
-
 
756
	u32 tmp;
816
	u32 tmp;
Line -... Line 817...
-
 
817
 
-
 
818
	if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
-
 
819
		return 0;
757
 
820
 
758
	dev_info(rdev->dev, "GPU softreset \n");
821
	dev_info(rdev->dev, "GPU softreset \n");
759
	dev_info(rdev->dev, "  R_008010_GRBM_STATUS=0x%08X\n",
822
	dev_info(rdev->dev, "  R_008010_GRBM_STATUS=0x%08X\n",
760
		RREG32(R_008010_GRBM_STATUS));
823
		RREG32(R_008010_GRBM_STATUS));
761
	dev_info(rdev->dev, "  R_008014_GRBM_STATUS2=0x%08X\n",
824
	dev_info(rdev->dev, "  R_008014_GRBM_STATUS2=0x%08X\n",
Line 765... Line 828...
765
	rv515_mc_stop(rdev, &save);
828
	rv515_mc_stop(rdev, &save);
766
	if (r600_mc_wait_for_idle(rdev)) {
829
	if (r600_mc_wait_for_idle(rdev)) {
767
		dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
830
		dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
768
	}
831
	}
769
	/* Disable CP parsing/prefetching */
832
	/* Disable CP parsing/prefetching */
770
	WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(0xff));
833
	WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
771
	/* Check if any of the rendering block is busy and reset it */
834
	/* Check if any of the rendering block is busy and reset it */
772
	if ((RREG32(R_008010_GRBM_STATUS) & grbm_busy_mask) ||
835
	if ((RREG32(R_008010_GRBM_STATUS) & grbm_busy_mask) ||
773
	    (RREG32(R_008014_GRBM_STATUS2) & grbm2_busy_mask)) {
836
	    (RREG32(R_008014_GRBM_STATUS2) & grbm2_busy_mask)) {
774
		tmp = S_008020_SOFT_RESET_CR(1) |
837
		tmp = S_008020_SOFT_RESET_CR(1) |
775
			S_008020_SOFT_RESET_DB(1) |
838
			S_008020_SOFT_RESET_DB(1) |
Line 784... Line 847...
784
			S_008020_SOFT_RESET_TA(1) |
847
			S_008020_SOFT_RESET_TA(1) |
785
			S_008020_SOFT_RESET_VC(1) |
848
			S_008020_SOFT_RESET_VC(1) |
786
			S_008020_SOFT_RESET_VGT(1);
849
			S_008020_SOFT_RESET_VGT(1);
787
		dev_info(rdev->dev, "  R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
850
		dev_info(rdev->dev, "  R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
788
		WREG32(R_008020_GRBM_SOFT_RESET, tmp);
851
		WREG32(R_008020_GRBM_SOFT_RESET, tmp);
789
		(void)RREG32(R_008020_GRBM_SOFT_RESET);
852
		RREG32(R_008020_GRBM_SOFT_RESET);
790
		udelay(50);
853
		mdelay(15);
791
		WREG32(R_008020_GRBM_SOFT_RESET, 0);
854
		WREG32(R_008020_GRBM_SOFT_RESET, 0);
792
		(void)RREG32(R_008020_GRBM_SOFT_RESET);
-
 
793
	}
855
	}
794
	/* Reset CP (we always reset CP) */
856
	/* Reset CP (we always reset CP) */
795
	tmp = S_008020_SOFT_RESET_CP(1);
857
	tmp = S_008020_SOFT_RESET_CP(1);
796
	dev_info(rdev->dev, "R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
858
	dev_info(rdev->dev, "R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
797
	WREG32(R_008020_GRBM_SOFT_RESET, tmp);
859
	WREG32(R_008020_GRBM_SOFT_RESET, tmp);
798
	(void)RREG32(R_008020_GRBM_SOFT_RESET);
860
	RREG32(R_008020_GRBM_SOFT_RESET);
799
	udelay(50);
861
	mdelay(15);
800
	WREG32(R_008020_GRBM_SOFT_RESET, 0);
862
	WREG32(R_008020_GRBM_SOFT_RESET, 0);
801
	(void)RREG32(R_008020_GRBM_SOFT_RESET);
-
 
802
	/* Reset others GPU block if necessary */
-
 
803
	if (G_000E50_RLC_BUSY(RREG32(R_000E50_SRBM_STATUS)))
-
 
804
		srbm_reset |= S_000E60_SOFT_RESET_RLC(1);
-
 
805
	if (G_000E50_GRBM_RQ_PENDING(RREG32(R_000E50_SRBM_STATUS)))
-
 
806
		srbm_reset |= S_000E60_SOFT_RESET_GRBM(1);
-
 
807
	if (G_000E50_HI_RQ_PENDING(RREG32(R_000E50_SRBM_STATUS)))
-
 
808
		srbm_reset |= S_000E60_SOFT_RESET_IH(1);
-
 
809
	if (G_000E50_VMC_BUSY(RREG32(R_000E50_SRBM_STATUS)))
-
 
810
		srbm_reset |= S_000E60_SOFT_RESET_VMC(1);
-
 
811
	if (G_000E50_MCB_BUSY(RREG32(R_000E50_SRBM_STATUS)))
-
 
812
		srbm_reset |= S_000E60_SOFT_RESET_MC(1);
-
 
813
	if (G_000E50_MCDZ_BUSY(RREG32(R_000E50_SRBM_STATUS)))
-
 
814
		srbm_reset |= S_000E60_SOFT_RESET_MC(1);
-
 
815
	if (G_000E50_MCDY_BUSY(RREG32(R_000E50_SRBM_STATUS)))
-
 
816
		srbm_reset |= S_000E60_SOFT_RESET_MC(1);
-
 
817
	if (G_000E50_MCDX_BUSY(RREG32(R_000E50_SRBM_STATUS)))
-
 
818
		srbm_reset |= S_000E60_SOFT_RESET_MC(1);
-
 
819
	if (G_000E50_MCDW_BUSY(RREG32(R_000E50_SRBM_STATUS)))
-
 
820
		srbm_reset |= S_000E60_SOFT_RESET_MC(1);
-
 
821
	if (G_000E50_RLC_BUSY(RREG32(R_000E50_SRBM_STATUS)))
-
 
822
		srbm_reset |= S_000E60_SOFT_RESET_RLC(1);
-
 
823
	if (G_000E50_SEM_BUSY(RREG32(R_000E50_SRBM_STATUS)))
-
 
824
		srbm_reset |= S_000E60_SOFT_RESET_SEM(1);
-
 
825
	if (G_000E50_BIF_BUSY(RREG32(R_000E50_SRBM_STATUS)))
-
 
826
		srbm_reset |= S_000E60_SOFT_RESET_BIF(1);
-
 
827
	dev_info(rdev->dev, "  R_000E60_SRBM_SOFT_RESET=0x%08X\n", srbm_reset);
-
 
828
	WREG32(R_000E60_SRBM_SOFT_RESET, srbm_reset);
-
 
829
	(void)RREG32(R_000E60_SRBM_SOFT_RESET);
-
 
830
	udelay(50);
-
 
831
	WREG32(R_000E60_SRBM_SOFT_RESET, 0);
-
 
832
	(void)RREG32(R_000E60_SRBM_SOFT_RESET);
-
 
833
	WREG32(R_000E60_SRBM_SOFT_RESET, srbm_reset);
-
 
834
	(void)RREG32(R_000E60_SRBM_SOFT_RESET);
-
 
835
	udelay(50);
-
 
836
	WREG32(R_000E60_SRBM_SOFT_RESET, 0);
-
 
837
	(void)RREG32(R_000E60_SRBM_SOFT_RESET);
-
 
838
	/* Wait a little for things to settle down */
863
	/* Wait a little for things to settle down */
839
	udelay(50);
864
	mdelay(1);
840
	dev_info(rdev->dev, "  R_008010_GRBM_STATUS=0x%08X\n",
865
	dev_info(rdev->dev, "  R_008010_GRBM_STATUS=0x%08X\n",
841
		RREG32(R_008010_GRBM_STATUS));
866
		RREG32(R_008010_GRBM_STATUS));
842
	dev_info(rdev->dev, "  R_008014_GRBM_STATUS2=0x%08X\n",
867
	dev_info(rdev->dev, "  R_008014_GRBM_STATUS2=0x%08X\n",
843
		RREG32(R_008014_GRBM_STATUS2));
868
		RREG32(R_008014_GRBM_STATUS2));
844
	dev_info(rdev->dev, "  R_000E50_SRBM_STATUS=0x%08X\n",
869
	dev_info(rdev->dev, "  R_000E50_SRBM_STATUS=0x%08X\n",
845
		RREG32(R_000E50_SRBM_STATUS));
870
		RREG32(R_000E50_SRBM_STATUS));
846
	/* After reset we need to reinit the asic as GPU often endup in an
-
 
847
	 * incoherent state.
-
 
848
	 */
-
 
849
	atom_asic_init(rdev->mode_info.atom_context);
-
 
850
	rv515_mc_resume(rdev, &save);
871
	rv515_mc_resume(rdev, &save);
851
	return 0;
872
	return 0;
852
}
873
}
Line -... Line 874...
-
 
874
 
-
 
875
bool r600_gpu_is_lockup(struct radeon_device *rdev)
-
 
876
{
-
 
877
	u32 srbm_status;
-
 
878
	u32 grbm_status;
-
 
879
	u32 grbm_status2;
-
 
880
	struct r100_gpu_lockup *lockup;
-
 
881
	int r;
-
 
882
 
-
 
883
	if (rdev->family >= CHIP_RV770)
-
 
884
		lockup = &rdev->config.rv770.lockup;
-
 
885
	else
-
 
886
		lockup = &rdev->config.r600.lockup;
-
 
887
 
-
 
888
	srbm_status = RREG32(R_000E50_SRBM_STATUS);
-
 
889
	grbm_status = RREG32(R_008010_GRBM_STATUS);
-
 
890
	grbm_status2 = RREG32(R_008014_GRBM_STATUS2);
-
 
891
	if (!G_008010_GUI_ACTIVE(grbm_status)) {
-
 
892
		r100_gpu_lockup_update(lockup, &rdev->cp);
-
 
893
		return false;
-
 
894
	}
-
 
895
	/* force CP activities */
-
 
896
	r = radeon_ring_lock(rdev, 2);
-
 
897
	if (!r) {
-
 
898
		/* PACKET2 NOP */
-
 
899
		radeon_ring_write(rdev, 0x80000000);
-
 
900
		radeon_ring_write(rdev, 0x80000000);
-
 
901
		radeon_ring_unlock_commit(rdev);
-
 
902
	}
-
 
903
	rdev->cp.rptr = RREG32(R600_CP_RB_RPTR);
-
 
904
	return r100_gpu_cp_is_lockup(rdev, lockup, &rdev->cp);
-
 
905
}
853
 
906
 
854
int r600_gpu_reset(struct radeon_device *rdev)
907
int r600_asic_reset(struct radeon_device *rdev)
855
{
908
{
856
	return r600_gpu_soft_reset(rdev);
909
	return r600_gpu_soft_reset(rdev);
Line 857... Line 910...
857
}
910
}
Line 1093... Line 1146...
1093
		break;
1146
		break;
1094
	}
1147
	}
1095
	rdev->config.r600.tiling_npipes = rdev->config.r600.max_tile_pipes;
1148
	rdev->config.r600.tiling_npipes = rdev->config.r600.max_tile_pipes;
1096
	rdev->config.r600.tiling_nbanks = 4 << ((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
1149
	rdev->config.r600.tiling_nbanks = 4 << ((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
1097
	tiling_config |= BANK_TILING((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
1150
	tiling_config |= BANK_TILING((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
1098
	tiling_config |= GROUP_SIZE(0);
1151
	tiling_config |= GROUP_SIZE((ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT);
-
 
1152
	if ((ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT)
-
 
1153
		rdev->config.r600.tiling_group_size = 512;
-
 
1154
	else
1099
	rdev->config.r600.tiling_group_size = 256;
1155
	rdev->config.r600.tiling_group_size = 256;
1100
	tmp = (ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT;
1156
	tmp = (ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT;
1101
	if (tmp > 3) {
1157
	if (tmp > 3) {
1102
		tiling_config |= ROW_TILING(3);
1158
		tiling_config |= ROW_TILING(3);
1103
		tiling_config |= SAMPLE_SPLIT(3);
1159
		tiling_config |= SAMPLE_SPLIT(3);
Line 1120... Line 1176...
1120
	backend_map = r600_get_tile_pipe_to_backend_map(rdev->config.r600.max_tile_pipes,
1176
	backend_map = r600_get_tile_pipe_to_backend_map(rdev->config.r600.max_tile_pipes,
1121
							(R6XX_MAX_BACKENDS -
1177
							(R6XX_MAX_BACKENDS -
1122
							 r600_count_pipe_bits((cc_rb_backend_disable &
1178
							 r600_count_pipe_bits((cc_rb_backend_disable &
1123
									       R6XX_MAX_BACKENDS_MASK) >> 16)),
1179
									       R6XX_MAX_BACKENDS_MASK) >> 16)),
1124
							(cc_rb_backend_disable >> 16));
1180
							(cc_rb_backend_disable >> 16));
1125
 
-
 
-
 
1181
	rdev->config.r600.tile_config = tiling_config;
1126
	tiling_config |= BACKEND_MAP(backend_map);
1182
	tiling_config |= BACKEND_MAP(backend_map);
1127
	WREG32(GB_TILING_CONFIG, tiling_config);
1183
	WREG32(GB_TILING_CONFIG, tiling_config);
1128
	WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff);
1184
	WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff);
1129
	WREG32(HDP_TILING_CONFIG, tiling_config & 0xffff);
1185
	WREG32(HDP_TILING_CONFIG, tiling_config & 0xffff);
Line 1130... Line 1186...
1130
 
1186
 
1131
	/* Setup pipes */
1187
	/* Setup pipes */
1132
	WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
1188
	WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
-
 
1189
	WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
Line 1133... Line 1190...
1133
	WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
1190
	WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
1134
 
1191
 
1135
	tmp = R6XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
1192
	tmp = R6XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
Line 1402... Line 1459...
1402
/*
1459
/*
1403
 * CP & Ring
1460
 * CP & Ring
1404
 */
1461
 */
1405
void r600_cp_stop(struct radeon_device *rdev)
1462
void r600_cp_stop(struct radeon_device *rdev)
1406
{
1463
{
-
 
1464
//   radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
1407
	WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
1465
	WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
-
 
1466
	WREG32(SCRATCH_UMSK, 0);
1408
}
1467
}
Line 1409... Line 1468...
1409
 
1468
 
1410
int r600_init_microcode(struct radeon_device *rdev)
1469
int r600_init_microcode(struct radeon_device *rdev)
1411
{
1470
{
Line 1466... Line 1525...
1466
		break;
1525
		break;
1467
	case CHIP_RV710:
1526
	case CHIP_RV710:
1468
		chip_name = "RV710";
1527
		chip_name = "RV710";
1469
		rlc_chip_name = "R700";
1528
		rlc_chip_name = "R700";
1470
		break;
1529
		break;
-
 
1530
	case CHIP_CEDAR:
-
 
1531
		chip_name = "CEDAR";
-
 
1532
		rlc_chip_name = "CEDAR";
-
 
1533
		break;
-
 
1534
	case CHIP_REDWOOD:
-
 
1535
		chip_name = "REDWOOD";
-
 
1536
		rlc_chip_name = "REDWOOD";
-
 
1537
		break;
-
 
1538
	case CHIP_JUNIPER:
-
 
1539
		chip_name = "JUNIPER";
-
 
1540
		rlc_chip_name = "JUNIPER";
-
 
1541
		break;
-
 
1542
	case CHIP_CYPRESS:
-
 
1543
	case CHIP_HEMLOCK:
-
 
1544
		chip_name = "CYPRESS";
-
 
1545
		rlc_chip_name = "CYPRESS";
-
 
1546
		break;
-
 
1547
	case CHIP_PALM:
-
 
1548
		chip_name = "PALM";
-
 
1549
		rlc_chip_name = "SUMO";
-
 
1550
		break;
1471
	default: BUG();
1551
	default: BUG();
1472
	}
1552
	}
Line 1473... Line 1553...
1473
 
1553
 
-
 
1554
	if (rdev->family >= CHIP_CEDAR) {
-
 
1555
		pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4;
-
 
1556
		me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
-
 
1557
		rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
1474
	if (rdev->family >= CHIP_RV770) {
1558
	} else if (rdev->family >= CHIP_RV770) {
1475
		pfp_req_size = R700_PFP_UCODE_SIZE * 4;
1559
		pfp_req_size = R700_PFP_UCODE_SIZE * 4;
1476
		me_req_size = R700_PM4_UCODE_SIZE * 4;
1560
		me_req_size = R700_PM4_UCODE_SIZE * 4;
1477
		rlc_req_size = R700_RLC_UCODE_SIZE * 4;
1561
		rlc_req_size = R700_RLC_UCODE_SIZE * 4;
1478
	} else {
1562
	} else {
Line 1543... Line 1627...
1543
	if (!rdev->me_fw || !rdev->pfp_fw)
1627
	if (!rdev->me_fw || !rdev->pfp_fw)
1544
		return -EINVAL;
1628
		return -EINVAL;
Line 1545... Line 1629...
1545
 
1629
 
Line -... Line 1630...
-
 
1630
	r600_cp_stop(rdev);
-
 
1631
 
-
 
1632
	WREG32(CP_RB_CNTL,
-
 
1633
#ifdef __BIG_ENDIAN
1546
	r600_cp_stop(rdev);
1634
	       BUF_SWAP_32BIT |
Line 1547... Line 1635...
1547
 
1635
#endif
1548
	WREG32(CP_RB_CNTL, RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3));
1636
	       RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3));
1549
 
1637
 
1550
	/* Reset cp */
1638
	/* Reset cp */
Line 1583... Line 1671...
1583
		DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
1671
		DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
1584
		return r;
1672
		return r;
1585
	}
1673
	}
1586
	radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5));
1674
	radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5));
1587
	radeon_ring_write(rdev, 0x1);
1675
	radeon_ring_write(rdev, 0x1);
1588
	if (rdev->family < CHIP_RV770) {
1676
	if (rdev->family >= CHIP_RV770) {
1589
		radeon_ring_write(rdev, 0x3);
-
 
1590
		radeon_ring_write(rdev, rdev->config.r600.max_hw_contexts - 1);
-
 
1591
	} else {
-
 
1592
		radeon_ring_write(rdev, 0x0);
1677
		radeon_ring_write(rdev, 0x0);
1593
		radeon_ring_write(rdev, rdev->config.rv770.max_hw_contexts - 1);
1678
		radeon_ring_write(rdev, rdev->config.rv770.max_hw_contexts - 1);
-
 
1679
	} else {
-
 
1680
		radeon_ring_write(rdev, 0x3);
-
 
1681
		radeon_ring_write(rdev, rdev->config.r600.max_hw_contexts - 1);
1594
	}
1682
	}
1595
	radeon_ring_write(rdev, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
1683
	radeon_ring_write(rdev, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
1596
	radeon_ring_write(rdev, 0);
1684
	radeon_ring_write(rdev, 0);
1597
	radeon_ring_write(rdev, 0);
1685
	radeon_ring_write(rdev, 0);
1598
	radeon_ring_unlock_commit(rdev);
1686
	radeon_ring_unlock_commit(rdev);
Line 1614... Line 1702...
1614
	mdelay(15);
1702
	mdelay(15);
1615
	WREG32(GRBM_SOFT_RESET, 0);
1703
	WREG32(GRBM_SOFT_RESET, 0);
Line 1616... Line 1704...
1616
 
1704
 
1617
	/* Set ring buffer size */
1705
	/* Set ring buffer size */
1618
	rb_bufsz = drm_order(rdev->cp.ring_size / 8);
1706
	rb_bufsz = drm_order(rdev->cp.ring_size / 8);
1619
	tmp = RB_NO_UPDATE | (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
1707
	tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
1620
#ifdef __BIG_ENDIAN
1708
#ifdef __BIG_ENDIAN
1621
	tmp |= BUF_SWAP_32BIT;
1709
	tmp |= BUF_SWAP_32BIT;
1622
#endif
1710
#endif
1623
	WREG32(CP_RB_CNTL, tmp);
1711
	WREG32(CP_RB_CNTL, tmp);
Line 1628... Line 1716...
1628
 
1716
 
1629
	/* Initialize the ring buffer's read and write pointers */
1717
	/* Initialize the ring buffer's read and write pointers */
1630
	WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
1718
	WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
1631
	WREG32(CP_RB_RPTR_WR, 0);
1719
	WREG32(CP_RB_RPTR_WR, 0);
-
 
1720
	WREG32(CP_RB_WPTR, 0);
-
 
1721
 
1632
	WREG32(CP_RB_WPTR, 0);
1722
	/* set the wb address whether it's enabled or not */
-
 
1723
	WREG32(CP_RB_RPTR_ADDR,
-
 
1724
#ifdef __BIG_ENDIAN
-
 
1725
	       RB_RPTR_SWAP(2) |
-
 
1726
#endif
1633
	WREG32(CP_RB_RPTR_ADDR, rdev->cp.gpu_addr & 0xFFFFFFFF);
1727
	       ((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC));
-
 
1728
	WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
-
 
1729
	WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
-
 
1730
 
-
 
1731
	if (rdev->wb.enabled)
-
 
1732
		WREG32(SCRATCH_UMSK, 0xff);
-
 
1733
	else {
-
 
1734
		tmp |= RB_NO_UPDATE;
-
 
1735
		WREG32(SCRATCH_UMSK, 0);
-
 
1736
	}
1634
	WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->cp.gpu_addr));
1737
 
1635
	mdelay(1);
1738
	mdelay(1);
Line 1636... Line 1739...
1636
	WREG32(CP_RB_CNTL, tmp);
1739
	WREG32(CP_RB_CNTL, tmp);
1637
 
1740
 
Line 1666... Line 1769...
1666
	ring_size = (1 << (rb_bufsz + 1)) * 4;
1769
	ring_size = (1 << (rb_bufsz + 1)) * 4;
1667
	rdev->cp.ring_size = ring_size;
1770
	rdev->cp.ring_size = ring_size;
1668
	rdev->cp.align_mask = 16 - 1;
1771
	rdev->cp.align_mask = 16 - 1;
1669
}
1772
}
Line -... Line 1773...
-
 
1773
 
-
 
1774
void r600_cp_fini(struct radeon_device *rdev)
-
 
1775
{
-
 
1776
	r600_cp_stop(rdev);
-
 
1777
	radeon_ring_fini(rdev);
-
 
1778
}
Line 1670... Line 1779...
1670
 
1779
 
1671
 
1780
 
1672
/*
1781
/*
1673
 * GPU scratch registers helpers function.
1782
 * GPU scratch registers helpers function.
1674
 */
1783
 */
1675
void r600_scratch_init(struct radeon_device *rdev)
1784
void r600_scratch_init(struct radeon_device *rdev)
Line 1676... Line 1785...
1676
{
1785
{
-
 
1786
	int i;
1677
	int i;
1787
 
1678
 
1788
	rdev->scratch.num_reg = 7;
1679
	rdev->scratch.num_reg = 7;
1789
	rdev->scratch.reg_base = SCRATCH_REG0;
1680
	for (i = 0; i < rdev->scratch.num_reg; i++) {
1790
	for (i = 0; i < rdev->scratch.num_reg; i++) {
1681
		rdev->scratch.free[i] = true;
1791
		rdev->scratch.free[i] = true;
Line 1682... Line 1792...
1682
		rdev->scratch.reg[i] = SCRATCH_REG0 + (i * 4);
1792
		rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
1683
	}
1793
	}
Line 1720... Line 1830...
1720
		r = -EINVAL;
1830
		r = -EINVAL;
1721
	}
1831
	}
1722
	radeon_scratch_free(rdev, scratch);
1832
	radeon_scratch_free(rdev, scratch);
1723
	return r;
1833
	return r;
1724
}
1834
}
-
 
1835
 
1725
void r600_fence_ring_emit(struct radeon_device *rdev,
1836
void r600_fence_ring_emit(struct radeon_device *rdev,
1726
			  struct radeon_fence *fence)
1837
			  struct radeon_fence *fence)
1727
{
1838
{
-
 
1839
	if (rdev->wb.use_event) {
-
 
1840
		u64 addr = rdev->wb.gpu_addr + R600_WB_EVENT_OFFSET +
-
 
1841
			(u64)(rdev->fence_drv.scratch_reg - rdev->scratch.reg_base);
1728
	/* Also consider EVENT_WRITE_EOP.  it handles the interrupts + timestamps + events */
1842
		/* EVENT_WRITE_EOP - flush caches, send int */
-
 
1843
		radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
-
 
1844
		radeon_ring_write(rdev, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5));
-
 
1845
		radeon_ring_write(rdev, addr & 0xffffffff);
-
 
1846
		radeon_ring_write(rdev, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2));
-
 
1847
		radeon_ring_write(rdev, fence->seq);
-
 
1848
		radeon_ring_write(rdev, 0);
1729
 
1849
	} else {
1730
	radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE, 0));
1850
	radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE, 0));
1731
	radeon_ring_write(rdev, CACHE_FLUSH_AND_INV_EVENT);
1851
		radeon_ring_write(rdev, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT) | EVENT_INDEX(0));
1732
	/* wait for 3D idle clean */
1852
	/* wait for 3D idle clean */
1733
	radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
1853
	radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
1734
	radeon_ring_write(rdev, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
1854
	radeon_ring_write(rdev, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
1735
	radeon_ring_write(rdev, WAIT_3D_IDLE_bit | WAIT_3D_IDLECLEAN_bit);
1855
	radeon_ring_write(rdev, WAIT_3D_IDLE_bit | WAIT_3D_IDLECLEAN_bit);
1736
	/* Emit fence sequence & fire IRQ */
1856
	/* Emit fence sequence & fire IRQ */
Line 1739... Line 1859...
1739
	radeon_ring_write(rdev, fence->seq);
1859
	radeon_ring_write(rdev, fence->seq);
1740
	/* CP_INTERRUPT packet 3 no longer exists, use packet 0 */
1860
	/* CP_INTERRUPT packet 3 no longer exists, use packet 0 */
1741
	radeon_ring_write(rdev, PACKET0(CP_INT_STATUS, 0));
1861
	radeon_ring_write(rdev, PACKET0(CP_INT_STATUS, 0));
1742
	radeon_ring_write(rdev, RB_INT_STAT);
1862
	radeon_ring_write(rdev, RB_INT_STAT);
1743
}
1863
	}
-
 
1864
}
-
 
1865
 
-
 
1866
 
1744
int r600_set_surface_reg(struct radeon_device *rdev, int reg,
1867
int r600_set_surface_reg(struct radeon_device *rdev, int reg,
1745
			 uint32_t tiling_flags, uint32_t pitch,
1868
			 uint32_t tiling_flags, uint32_t pitch,
1746
			 uint32_t offset, uint32_t obj_size)
1869
			 uint32_t offset, uint32_t obj_size)
1747
{
1870
{
1748
	/* FIXME: implement */
1871
	/* FIXME: implement */
Line 1752... Line 1875...
1752
void r600_clear_surface_reg(struct radeon_device *rdev, int reg)
1875
void r600_clear_surface_reg(struct radeon_device *rdev, int reg)
1753
{
1876
{
1754
	/* FIXME: implement */
1877
	/* FIXME: implement */
1755
}
1878
}
Line 1756... Line -...
1756
 
-
 
1757
 
-
 
1758
bool r600_card_posted(struct radeon_device *rdev)
-
 
1759
{
-
 
1760
	uint32_t reg;
-
 
1761
 
-
 
1762
	/* first check CRTCs */
-
 
1763
	reg = RREG32(D1CRTC_CONTROL) |
-
 
1764
		RREG32(D2CRTC_CONTROL);
-
 
1765
	if (reg & CRTC_EN)
-
 
1766
		return true;
-
 
1767
 
-
 
1768
	/* then check MEM_SIZE, in case the crtcs are off */
-
 
1769
	if (RREG32(CONFIG_MEMSIZE))
-
 
1770
		return true;
-
 
1771
 
-
 
1772
	return false;
-
 
1773
}
-
 
1774
 
1879
 
1775
int r600_startup(struct radeon_device *rdev)
1880
int r600_startup(struct radeon_device *rdev)
1776
{
1881
{
Line -... Line 1882...
-
 
1882
	int r;
-
 
1883
 
-
 
1884
	/* enable pcie gen2 link */
1777
	int r;
1885
	r600_pcie_gen2_enable(rdev);
1778
 
1886
 
1779
	if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
1887
	if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
1780
		r = r600_init_microcode(rdev);
1888
		r = r600_init_microcode(rdev);
1781
		if (r) {
1889
		if (r) {
Line 1801... Line 1909...
1801
	if (r)
1909
	if (r)
1802
		return r;
1910
		return r;
1803
	r = r600_cp_resume(rdev);
1911
	r = r600_cp_resume(rdev);
1804
	if (r)
1912
	if (r)
1805
		return r;
1913
		return r;
1806
	/* write back buffer are not vital so don't worry about failure */
-
 
1807
//	r600_wb_enable(rdev);
-
 
-
 
1914
 
1808
	return 0;
1915
	return 0;
1809
}
1916
}
Line 1810... Line 1917...
1810
 
1917
 
1811
void r600_vga_set_state(struct radeon_device *rdev, bool state)
1918
void r600_vga_set_state(struct radeon_device *rdev, bool state)
Line 1834... Line 1941...
1834
 */
1941
 */
1835
int r600_init(struct radeon_device *rdev)
1942
int r600_init(struct radeon_device *rdev)
1836
{
1943
{
1837
	int r;
1944
	int r;
Line 1838... Line -...
1838
 
-
 
1839
	r = radeon_dummy_page_init(rdev);
-
 
1840
	if (r)
-
 
1841
		return r;
1945
 
1842
	if (r600_debugfs_mc_info_init(rdev)) {
1946
	if (r600_debugfs_mc_info_init(rdev)) {
1843
		DRM_ERROR("Failed to register debugfs file for mc !\n");
1947
		DRM_ERROR("Failed to register debugfs file for mc !\n");
1844
	}
1948
	}
1845
	/* This don't do much */
1949
	/* This don't do much */
1846
	r = radeon_gem_init(rdev);
1950
//   r = radeon_gem_init(rdev);
1847
	if (r)
1951
//   if (r)
1848
		return r;
1952
//       return r;
1849
	/* Read BIOS */
1953
	/* Read BIOS */
1850
	if (!radeon_get_bios(rdev)) {
1954
	if (!radeon_get_bios(rdev)) {
1851
		if (ASIC_IS_AVIVO(rdev))
1955
		if (ASIC_IS_AVIVO(rdev))
1852
			return -EINVAL;
1956
			return -EINVAL;
Line 1858... Line 1962...
1858
	}
1962
	}
1859
	r = radeon_atombios_init(rdev);
1963
	r = radeon_atombios_init(rdev);
1860
	if (r)
1964
	if (r)
1861
		return r;
1965
		return r;
1862
	/* Post card if necessary */
1966
	/* Post card if necessary */
1863
	if (!r600_card_posted(rdev)) {
1967
	if (!radeon_card_posted(rdev)) {
1864
		if (!rdev->bios) {
1968
		if (!rdev->bios) {
1865
			dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
1969
			dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
1866
			return -EINVAL;
1970
			return -EINVAL;
1867
		}
1971
		}
1868
		DRM_INFO("GPU not posted. posting now...\n");
1972
		DRM_INFO("GPU not posted. posting now...\n");
Line 1872... Line 1976...
1872
	r600_scratch_init(rdev);
1976
	r600_scratch_init(rdev);
1873
	/* Initialize surface registers */
1977
	/* Initialize surface registers */
1874
	radeon_surface_init(rdev);
1978
	radeon_surface_init(rdev);
1875
	/* Initialize clocks */
1979
	/* Initialize clocks */
1876
	radeon_get_clock_info(rdev->ddev);
1980
	radeon_get_clock_info(rdev->ddev);
1877
	r = radeon_clocks_init(rdev);
-
 
1878
	if (r)
-
 
1879
		return r;
-
 
1880
	/* Initialize power management */
-
 
1881
	radeon_pm_init(rdev);
-
 
1882
	/* Fence driver */
1981
	/* Fence driver */
-
 
1982
//	r = radeon_fence_driver_init(rdev);
-
 
1983
//	if (r)
-
 
1984
//		return r;
1883
	if (rdev->flags & RADEON_IS_AGP) {
1985
	if (rdev->flags & RADEON_IS_AGP) {
1884
		r = radeon_agp_init(rdev);
1986
		r = radeon_agp_init(rdev);
1885
		if (r)
1987
		if (r)
1886
			radeon_agp_disable(rdev);
1988
			radeon_agp_disable(rdev);
1887
	}
1989
	}
Line 1930... Line 2032...
1930
//		}
2032
//		}
1931
	}
2033
	}
1932
	return 0;
2034
	return 0;
1933
}
2035
}
Line -... Line 2036...
-
 
2036
 
-
 
2037
static void r600_disable_interrupt_state(struct radeon_device *rdev)
-
 
2038
{
-
 
2039
	u32 tmp;
-
 
2040
 
-
 
2041
	WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
-
 
2042
	WREG32(GRBM_INT_CNTL, 0);
-
 
2043
	WREG32(DxMODE_INT_MASK, 0);
-
 
2044
	WREG32(D1GRPH_INTERRUPT_CONTROL, 0);
-
 
2045
	WREG32(D2GRPH_INTERRUPT_CONTROL, 0);
-
 
2046
	if (ASIC_IS_DCE3(rdev)) {
-
 
2047
		WREG32(DCE3_DACA_AUTODETECT_INT_CONTROL, 0);
-
 
2048
		WREG32(DCE3_DACB_AUTODETECT_INT_CONTROL, 0);
-
 
2049
		tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY;
-
 
2050
		WREG32(DC_HPD1_INT_CONTROL, tmp);
-
 
2051
		tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY;
-
 
2052
		WREG32(DC_HPD2_INT_CONTROL, tmp);
-
 
2053
		tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY;
-
 
2054
		WREG32(DC_HPD3_INT_CONTROL, tmp);
-
 
2055
		tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY;
-
 
2056
		WREG32(DC_HPD4_INT_CONTROL, tmp);
-
 
2057
		if (ASIC_IS_DCE32(rdev)) {
-
 
2058
			tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY;
-
 
2059
			WREG32(DC_HPD5_INT_CONTROL, tmp);
-
 
2060
			tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY;
-
 
2061
			WREG32(DC_HPD6_INT_CONTROL, tmp);
-
 
2062
		}
-
 
2063
	} else {
-
 
2064
		WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
-
 
2065
		WREG32(DACB_AUTODETECT_INT_CONTROL, 0);
-
 
2066
		tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
-
 
2067
		WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
-
 
2068
		tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
-
 
2069
		WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
-
 
2070
		tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
-
 
2071
		WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
-
 
2072
	}
Line 2005... Line 2144...
2005
 * http://bugzilla.kernel.org/show_bug.cgi?id=15186 to avoid this we
2144
 * http://bugzilla.kernel.org/show_bug.cgi?id=15186 to avoid this we
2006
 * directly perform HDP flush by writing register through MMIO.
2145
 * directly perform HDP flush by writing register through MMIO.
2007
 */
2146
 */
2008
void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo)
2147
void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo)
2009
{
2148
{
-
 
2149
	/* r7xx hw bug.  write to HDP_DEBUG1 followed by fb read
-
 
2150
	 * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL.
-
 
2151
	 * This seems to cause problems on some AGP cards. Just use the old
-
 
2152
	 * method for them.
-
 
2153
	 */
-
 
2154
	if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) &&
-
 
2155
	    rdev->vram_scratch.ptr && !(rdev->flags & RADEON_IS_AGP)) {
-
 
2156
		void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
-
 
2157
		u32 tmp;
-
 
2158
 
-
 
2159
		WREG32(HDP_DEBUG1, 0);
-
 
2160
		tmp = readl((void __iomem *)ptr);
-
 
2161
	} else
2010
	WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
2162
	WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
2011
}
2163
}
-
 
2164
 
-
 
2165
void r600_set_pcie_lanes(struct radeon_device *rdev, int lanes)
-
 
2166
{
-
 
2167
	u32 link_width_cntl, mask, target_reg;
-
 
2168
 
-
 
2169
	if (rdev->flags & RADEON_IS_IGP)
-
 
2170
		return;
-
 
2171
 
-
 
2172
	if (!(rdev->flags & RADEON_IS_PCIE))
-
 
2173
		return;
-
 
2174
 
-
 
2175
	/* x2 cards have a special sequence */
-
 
2176
	if (ASIC_IS_X2(rdev))
-
 
2177
		return;
-
 
2178
 
-
 
2179
	/* FIXME wait for idle */
-
 
2180
 
-
 
2181
	switch (lanes) {
-
 
2182
	case 0:
-
 
2183
		mask = RADEON_PCIE_LC_LINK_WIDTH_X0;
-
 
2184
		break;
-
 
2185
	case 1:
-
 
2186
		mask = RADEON_PCIE_LC_LINK_WIDTH_X1;
-
 
2187
		break;
-
 
2188
	case 2:
-
 
2189
		mask = RADEON_PCIE_LC_LINK_WIDTH_X2;
-
 
2190
		break;
-
 
2191
	case 4:
-
 
2192
		mask = RADEON_PCIE_LC_LINK_WIDTH_X4;
-
 
2193
		break;
-
 
2194
	case 8:
-
 
2195
		mask = RADEON_PCIE_LC_LINK_WIDTH_X8;
-
 
2196
		break;
-
 
2197
	case 12:
-
 
2198
		mask = RADEON_PCIE_LC_LINK_WIDTH_X12;
-
 
2199
		break;
-
 
2200
	case 16:
-
 
2201
	default:
-
 
2202
		mask = RADEON_PCIE_LC_LINK_WIDTH_X16;
-
 
2203
		break;
-
 
2204
	}
-
 
2205
 
-
 
2206
	link_width_cntl = RREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
-
 
2207
 
-
 
2208
	if ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) ==
-
 
2209
	    (mask << RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT))
-
 
2210
		return;
-
 
2211
 
-
 
2212
	if (link_width_cntl & R600_PCIE_LC_UPCONFIGURE_DIS)
-
 
2213
		return;
-
 
2214
 
-
 
2215
	link_width_cntl &= ~(RADEON_PCIE_LC_LINK_WIDTH_MASK |
-
 
2216
			     RADEON_PCIE_LC_RECONFIG_NOW |
-
 
2217
			     R600_PCIE_LC_RENEGOTIATE_EN |
-
 
2218
			     R600_PCIE_LC_RECONFIG_ARC_MISSING_ESCAPE);
-
 
2219
	link_width_cntl |= mask;
-
 
2220
 
-
 
2221
	WREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
-
 
2222
 
-
 
2223
        /* some northbridges can renegotiate the link rather than requiring                                  
-
 
2224
         * a complete re-config.                                                                             
-
 
2225
         * e.g., AMD 780/790 northbridges (pci ids: 0x5956, 0x5957, 0x5958, etc.)                            
-
 
2226
         */
-
 
2227
        if (link_width_cntl & R600_PCIE_LC_RENEGOTIATION_SUPPORT)
-
 
2228
		link_width_cntl |= R600_PCIE_LC_RENEGOTIATE_EN | R600_PCIE_LC_UPCONFIGURE_SUPPORT;
-
 
2229
        else
-
 
2230
		link_width_cntl |= R600_PCIE_LC_RECONFIG_ARC_MISSING_ESCAPE;
-
 
2231
 
-
 
2232
	WREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL, (link_width_cntl |
-
 
2233
						       RADEON_PCIE_LC_RECONFIG_NOW));
-
 
2234
 
-
 
2235
        if (rdev->family >= CHIP_RV770)
-
 
2236
		target_reg = R700_TARGET_AND_CURRENT_PROFILE_INDEX;
-
 
2237
        else
-
 
2238
		target_reg = R600_TARGET_AND_CURRENT_PROFILE_INDEX;
-
 
2239
 
-
 
2240
        /* wait for lane set to complete */
-
 
2241
        link_width_cntl = RREG32(target_reg);
-
 
2242
        while (link_width_cntl == 0xffffffff)
-
 
2243
		link_width_cntl = RREG32(target_reg);
-
 
2244
 
-
 
2245
}
-
 
2246
 
-
 
2247
int r600_get_pcie_lanes(struct radeon_device *rdev)
-
 
2248
{
-
 
2249
	u32 link_width_cntl;
-
 
2250
 
-
 
2251
	if (rdev->flags & RADEON_IS_IGP)
-
 
2252
		return 0;
-
 
2253
 
-
 
2254
	if (!(rdev->flags & RADEON_IS_PCIE))
-
 
2255
		return 0;
-
 
2256
 
-
 
2257
	/* x2 cards have a special sequence */
-
 
2258
	if (ASIC_IS_X2(rdev))
-
 
2259
		return 0;
-
 
2260
 
-
 
2261
	/* FIXME wait for idle */
-
 
2262
 
-
 
2263
	link_width_cntl = RREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
-
 
2264
 
-
 
2265
	switch ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) >> RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT) {
-
 
2266
	case RADEON_PCIE_LC_LINK_WIDTH_X0:
-
 
2267
		return 0;
-
 
2268
	case RADEON_PCIE_LC_LINK_WIDTH_X1:
-
 
2269
		return 1;
-
 
2270
	case RADEON_PCIE_LC_LINK_WIDTH_X2:
-
 
2271
		return 2;
-
 
2272
	case RADEON_PCIE_LC_LINK_WIDTH_X4:
-
 
2273
		return 4;
-
 
2274
	case RADEON_PCIE_LC_LINK_WIDTH_X8:
-
 
2275
		return 8;
-
 
2276
	case RADEON_PCIE_LC_LINK_WIDTH_X16:
-
 
2277
	default:
-
 
2278
		return 16;
-
 
2279
	}
-
 
2280
}
-
 
2281
 
-
 
2282
static void r600_pcie_gen2_enable(struct radeon_device *rdev)
-
 
2283
{
-
 
2284
	u32 link_width_cntl, lanes, speed_cntl, training_cntl, tmp;
-
 
2285
	u16 link_cntl2;
-
 
2286
 
-
 
2287
	if (radeon_pcie_gen2 == 0)
-
 
2288
		return;
-
 
2289
 
-
 
2290
	if (rdev->flags & RADEON_IS_IGP)
-
 
2291
		return;
-
 
2292
 
-
 
2293
	if (!(rdev->flags & RADEON_IS_PCIE))
-
 
2294
		return;
-
 
2295
 
-
 
2296
	/* x2 cards have a special sequence */
-
 
2297
	if (ASIC_IS_X2(rdev))
-
 
2298
		return;
-
 
2299
 
-
 
2300
	/* only RV6xx+ chips are supported */
-
 
2301
	if (rdev->family <= CHIP_R600)
-
 
2302
		return;
-
 
2303
 
-
 
2304
	/* 55 nm r6xx asics */
-
 
2305
	if ((rdev->family == CHIP_RV670) ||
-
 
2306
	    (rdev->family == CHIP_RV620) ||
-
 
2307
	    (rdev->family == CHIP_RV635)) {
-
 
2308
		/* advertise upconfig capability */
-
 
2309
		link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
-
 
2310
		link_width_cntl &= ~LC_UPCONFIGURE_DIS;
-
 
2311
		WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
-
 
2312
		link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
-
 
2313
		if (link_width_cntl & LC_RENEGOTIATION_SUPPORT) {
-
 
2314
			lanes = (link_width_cntl & LC_LINK_WIDTH_RD_MASK) >> LC_LINK_WIDTH_RD_SHIFT;
-
 
2315
			link_width_cntl &= ~(LC_LINK_WIDTH_MASK |
-
 
2316
					     LC_RECONFIG_ARC_MISSING_ESCAPE);
-
 
2317
			link_width_cntl |= lanes | LC_RECONFIG_NOW | LC_RENEGOTIATE_EN;
-
 
2318
			WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
-
 
2319
		} else {
-
 
2320
			link_width_cntl |= LC_UPCONFIGURE_DIS;
-
 
2321
			WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
-
 
2322
		}
-
 
2323
	}
-
 
2324
 
-
 
2325
	speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
-
 
2326
	if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) &&
-
 
2327
	    (speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) {
-
 
2328
 
-
 
2329
		/* 55 nm r6xx asics */
-
 
2330
		if ((rdev->family == CHIP_RV670) ||
-
 
2331
		    (rdev->family == CHIP_RV620) ||
-
 
2332
		    (rdev->family == CHIP_RV635)) {
-
 
2333
			WREG32(MM_CFGREGS_CNTL, 0x8);
-
 
2334
			link_cntl2 = RREG32(0x4088);
-
 
2335
			WREG32(MM_CFGREGS_CNTL, 0);
-
 
2336
			/* not supported yet */
-
 
2337
			if (link_cntl2 & SELECTABLE_DEEMPHASIS)
-
 
2338
				return;
-
 
2339
		}
-
 
2340
 
-
 
2341
		speed_cntl &= ~LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK;
-
 
2342
		speed_cntl |= (0x3 << LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT);
-
 
2343
		speed_cntl &= ~LC_VOLTAGE_TIMER_SEL_MASK;
-
 
2344
		speed_cntl &= ~LC_FORCE_DIS_HW_SPEED_CHANGE;
-
 
2345
		speed_cntl |= LC_FORCE_EN_HW_SPEED_CHANGE;
-
 
2346
		WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
-
 
2347
 
-
 
2348
		tmp = RREG32(0x541c);
-
 
2349
		WREG32(0x541c, tmp | 0x8);
-
 
2350
		WREG32(MM_CFGREGS_CNTL, MM_WR_TO_CFG_EN);
-
 
2351
		link_cntl2 = RREG16(0x4088);
-
 
2352
		link_cntl2 &= ~TARGET_LINK_SPEED_MASK;
-
 
2353
		link_cntl2 |= 0x2;
-
 
2354
		WREG16(0x4088, link_cntl2);
-
 
2355
		WREG32(MM_CFGREGS_CNTL, 0);
-
 
2356
 
-
 
2357
		if ((rdev->family == CHIP_RV670) ||
-
 
2358
		    (rdev->family == CHIP_RV620) ||
-
 
2359
		    (rdev->family == CHIP_RV635)) {
-
 
2360
			training_cntl = RREG32_PCIE_P(PCIE_LC_TRAINING_CNTL);
-
 
2361
			training_cntl &= ~LC_POINT_7_PLUS_EN;
-
 
2362
			WREG32_PCIE_P(PCIE_LC_TRAINING_CNTL, training_cntl);
-
 
2363
		} else {
-
 
2364
			speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
-
 
2365
			speed_cntl &= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN;
-
 
2366
			WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
-
 
2367
		}
-
 
2368
 
-
 
2369
		speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
-
 
2370
		speed_cntl |= LC_GEN2_EN_STRAP;
-
 
2371
		WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
-
 
2372
 
-
 
2373
	} else {
-
 
2374
		link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
-
 
2375
		/* XXX: only disable it if gen1 bridge vendor == 0x111d or 0x1106 */
-
 
2376
		if (1)
-
 
2377
			link_width_cntl |= LC_UPCONFIGURE_DIS;
-
 
2378
		else
-
 
2379
			link_width_cntl &= ~LC_UPCONFIGURE_DIS;
-
 
2380
		WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
-
 
2381
	}
-
 
2382
}