Subversion Repositories Kolibri OS

Rev

Rev 5271 | Go to most recent revision | Show entire file | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 5271 Rev 6104
Line 25... Line 25...
25
#include 
25
#include 
26
#include 
26
#include 
27
#include 
27
#include 
28
#include "radeon.h"
28
#include "radeon.h"
29
#include "radeon_asic.h"
29
#include "radeon_asic.h"
-
 
30
#include "radeon_audio.h"
30
#include 
31
#include 
31
#include "nid.h"
32
#include "nid.h"
32
#include "atom.h"
33
#include "atom.h"
33
#include "ni_reg.h"
34
#include "ni_reg.h"
34
#include "cayman_blit_shaders.h"
35
#include "cayman_blit_shaders.h"
35
#include "radeon_ucode.h"
36
#include "radeon_ucode.h"
36
#include "clearstate_cayman.h"
37
#include "clearstate_cayman.h"
Line -... Line 38...
-
 
38
 
-
 
39
/*
-
 
40
 * Indirect registers accessor
-
 
41
 */
-
 
42
u32 tn_smc_rreg(struct radeon_device *rdev, u32 reg)
-
 
43
{
-
 
44
	unsigned long flags;
-
 
45
	u32 r;
-
 
46
 
-
 
47
	spin_lock_irqsave(&rdev->smc_idx_lock, flags);
-
 
48
	WREG32(TN_SMC_IND_INDEX_0, (reg));
-
 
49
	r = RREG32(TN_SMC_IND_DATA_0);
-
 
50
	spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
-
 
51
	return r;
-
 
52
}
-
 
53
 
-
 
54
void tn_smc_wreg(struct radeon_device *rdev, u32 reg, u32 v)
-
 
55
{
-
 
56
	unsigned long flags;
-
 
57
 
-
 
58
	spin_lock_irqsave(&rdev->smc_idx_lock, flags);
-
 
59
	WREG32(TN_SMC_IND_INDEX_0, (reg));
-
 
60
	WREG32(TN_SMC_IND_DATA_0, (v));
-
 
61
	spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
-
 
62
}
37
 
63
 
38
static const u32 tn_rlc_save_restore_register_list[] =
64
static const u32 tn_rlc_save_restore_register_list[] =
39
{
65
{
40
	0x98fc,
66
	0x98fc,
41
	0x98f0,
67
	0x98f0,
Line 777... Line 803...
777
		err = -EINVAL;
803
		err = -EINVAL;
778
	}
804
	}
Line 779... Line 805...
779
 
805
 
780
	/* no MC ucode on TN */
806
	/* no MC ucode on TN */
781
	if (!(rdev->flags & RADEON_IS_IGP)) {
807
	if (!(rdev->flags & RADEON_IS_IGP)) {
782
	snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
808
		snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
783
		err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
809
		err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
784
	if (err)
810
		if (err)
785
		goto out;
811
			goto out;
786
	if (rdev->mc_fw->size != mc_req_size) {
812
		if (rdev->mc_fw->size != mc_req_size) {
787
		printk(KERN_ERR
813
			printk(KERN_ERR
788
		       "ni_mc: Bogus length %zu in firmware \"%s\"\n",
814
			       "ni_mc: Bogus length %zu in firmware \"%s\"\n",
789
		       rdev->mc_fw->size, fw_name);
815
			       rdev->mc_fw->size, fw_name);
790
		err = -EINVAL;
816
			err = -EINVAL;
791
	}
817
		}
Line 792... Line 818...
792
	}
818
	}
793
 
819
 
794
	if ((rdev->family >= CHIP_BARTS) && (rdev->family <= CHIP_CAYMAN)) {
820
	if ((rdev->family >= CHIP_BARTS) && (rdev->family <= CHIP_CAYMAN)) {
Line 825... Line 851...
825
		rdev->mc_fw = NULL;
851
		rdev->mc_fw = NULL;
826
	}
852
	}
827
	return err;
853
	return err;
828
}
854
}
Line -... Line 855...
-
 
855
 
-
 
856
/**
-
 
857
 * cayman_get_allowed_info_register - fetch the register for the info ioctl
-
 
858
 *
-
 
859
 * @rdev: radeon_device pointer
-
 
860
 * @reg: register offset in bytes
-
 
861
 * @val: register value
-
 
862
 *
-
 
863
 * Returns 0 for success or -EINVAL for an invalid register
-
 
864
 *
-
 
865
 */
-
 
866
int cayman_get_allowed_info_register(struct radeon_device *rdev,
-
 
867
				     u32 reg, u32 *val)
-
 
868
{
-
 
869
	switch (reg) {
-
 
870
	case GRBM_STATUS:
-
 
871
	case GRBM_STATUS_SE0:
-
 
872
	case GRBM_STATUS_SE1:
-
 
873
	case SRBM_STATUS:
-
 
874
	case SRBM_STATUS2:
-
 
875
	case (DMA_STATUS_REG + DMA0_REGISTER_OFFSET):
-
 
876
	case (DMA_STATUS_REG + DMA1_REGISTER_OFFSET):
-
 
877
	case UVD_STATUS:
-
 
878
		*val = RREG32(reg);
-
 
879
		return 0;
-
 
880
	default:
-
 
881
		return -EINVAL;
-
 
882
	}
-
 
883
}
829
 
884
 
830
int tn_get_temp(struct radeon_device *rdev)
885
int tn_get_temp(struct radeon_device *rdev)
831
{
886
{
832
	u32 temp = RREG32_SMC(TN_CURRENT_GNB_TEMP) & 0x7ff;
887
	u32 temp = RREG32_SMC(TN_CURRENT_GNB_TEMP) & 0x7ff;
Line 959... Line 1014...
959
		WREG32((0x2c20 + j), 0x00000000);
1014
		WREG32((0x2c20 + j), 0x00000000);
960
		WREG32((0x2c24 + j), 0x00000000);
1015
		WREG32((0x2c24 + j), 0x00000000);
961
	}
1016
	}
Line 962... Line 1017...
962
 
1017
 
-
 
1018
	WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
-
 
1019
	WREG32(SRBM_INT_CNTL, 0x1);
Line 963... Line 1020...
963
	WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
1020
	WREG32(SRBM_INT_ACK, 0x1);
Line 964... Line 1021...
964
 
1021
 
965
	evergreen_fix_pci_max_read_req_size(rdev);
1022
	evergreen_fix_pci_max_read_req_size(rdev);
Line 1083... Line 1140...
1083
	WREG32(UVD_UDEC_DB_ADDR_CONFIG, gb_addr_config);
1140
	WREG32(UVD_UDEC_DB_ADDR_CONFIG, gb_addr_config);
1084
	WREG32(UVD_UDEC_DBW_ADDR_CONFIG, gb_addr_config);
1141
	WREG32(UVD_UDEC_DBW_ADDR_CONFIG, gb_addr_config);
Line 1085... Line 1142...
1085
 
1142
 
1086
	if ((rdev->config.cayman.max_backends_per_se == 1) &&
1143
	if ((rdev->config.cayman.max_backends_per_se == 1) &&
1087
	    (rdev->flags & RADEON_IS_IGP)) {
1144
	    (rdev->flags & RADEON_IS_IGP)) {
1088
		if ((disabled_rb_mask & 3) == 1) {
-
 
1089
			/* RB0 disabled, RB1 enabled */
-
 
1090
			tmp = 0x11111111;
-
 
1091
		} else {
1145
		if ((disabled_rb_mask & 3) == 2) {
1092
			/* RB1 disabled, RB0 enabled */
1146
			/* RB1 disabled, RB0 enabled */
-
 
1147
			tmp = 0x00000000;
-
 
1148
		} else {
-
 
1149
			/* RB0 disabled, RB1 enabled */
1093
			tmp = 0x00000000;
1150
			tmp = 0x11111111;
1094
		}
1151
		}
1095
	} else {
1152
	} else {
1096
	tmp = gb_addr_config & NUM_PIPES_MASK;
1153
		tmp = gb_addr_config & NUM_PIPES_MASK;
1097
	tmp = r6xx_remap_render_backend(rdev, tmp,
1154
		tmp = r6xx_remap_render_backend(rdev, tmp,
1098
					rdev->config.cayman.max_backends_per_se *
1155
						rdev->config.cayman.max_backends_per_se *
1099
					rdev->config.cayman.max_shader_engines,
1156
						rdev->config.cayman.max_shader_engines,
1100
					CAYMAN_MAX_BACKENDS, disabled_rb_mask);
1157
						CAYMAN_MAX_BACKENDS, disabled_rb_mask);
1101
	}
1158
	}
Line 1102... Line 1159...
1102
	WREG32(GB_BACKEND_MAP, tmp);
1159
	WREG32(GB_BACKEND_MAP, tmp);
1103
 
1160
 
Line 1267... Line 1324...
1267
	 * the VMs are determined by the application and setup and assigned
1324
	 * the VMs are determined by the application and setup and assigned
1268
	 * on the fly in the vm part of radeon_gart.c
1325
	 * on the fly in the vm part of radeon_gart.c
1269
	 */
1326
	 */
1270
	for (i = 1; i < 8; i++) {
1327
	for (i = 1; i < 8; i++) {
1271
		WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR + (i << 2), 0);
1328
		WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR + (i << 2), 0);
1272
		WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (i << 2), rdev->vm_manager.max_pfn);
1329
		WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (i << 2),
-
 
1330
			rdev->vm_manager.max_pfn - 1);
1273
		WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2),
1331
		WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2),
1274
		       rdev->vm_manager.saved_table_addr[i]);
1332
		       rdev->vm_manager.saved_table_addr[i]);
1275
	}
1333
	}
Line 1276... Line 1334...
1276
 
1334
 
Line 1326... Line 1384...
1326
	WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
1384
	WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
1327
	       L2_CACHE_BIGK_FRAGMENT_SIZE(6));
1385
	       L2_CACHE_BIGK_FRAGMENT_SIZE(6));
1328
	radeon_gart_table_vram_unpin(rdev);
1386
	radeon_gart_table_vram_unpin(rdev);
1329
}
1387
}
Line -... Line 1388...
-
 
1388
 
-
 
1389
static void cayman_pcie_gart_fini(struct radeon_device *rdev)
-
 
1390
{
-
 
1391
	cayman_pcie_gart_disable(rdev);
-
 
1392
	radeon_gart_table_vram_free(rdev);
-
 
1393
	radeon_gart_fini(rdev);
-
 
1394
}
1330
 
1395
 
1331
void cayman_cp_int_cntl_setup(struct radeon_device *rdev,
1396
void cayman_cp_int_cntl_setup(struct radeon_device *rdev,
1332
			      int ring, u32 cp_int_cntl)
1397
			      int ring, u32 cp_int_cntl)
1333
{
1398
{
Line 1375... Line 1440...
1375
	radeon_ring_write(ring, 1);
1440
	radeon_ring_write(ring, 1);
Line 1376... Line 1441...
1376
 
1441
 
1377
	if (ring->rptr_save_reg) {
1442
	if (ring->rptr_save_reg) {
1378
		uint32_t next_rptr = ring->wptr + 3 + 4 + 8;
1443
		uint32_t next_rptr = ring->wptr + 3 + 4 + 8;
1379
		radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
1444
		radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
1380
		radeon_ring_write(ring, ((ring->rptr_save_reg -
1445
		radeon_ring_write(ring, ((ring->rptr_save_reg - 
1381
					  PACKET3_SET_CONFIG_REG_START) >> 2));
1446
					  PACKET3_SET_CONFIG_REG_START) >> 2));
1382
		radeon_ring_write(ring, next_rptr);
1447
		radeon_ring_write(ring, next_rptr);
Line 1383... Line 1448...
1383
	}
1448
	}
Line 1403... Line 1468...
1403
{
1468
{
1404
	if (enable)
1469
	if (enable)
1405
		WREG32(CP_ME_CNTL, 0);
1470
		WREG32(CP_ME_CNTL, 0);
1406
	else {
1471
	else {
1407
		if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX)
1472
		if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX)
1408
		radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
1473
			radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
1409
		WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT));
1474
		WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT));
1410
		WREG32(SCRATCH_UMSK, 0);
1475
		WREG32(SCRATCH_UMSK, 0);
1411
		rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
1476
		rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
1412
	}
1477
	}
1413
}
1478
}
Line 1552... Line 1617...
1552
	/* XXX init other rings */
1617
	/* XXX init other rings */
Line 1553... Line 1618...
1553
 
1618
 
1554
	return 0;
1619
	return 0;
Line -... Line 1620...
-
 
1620
}
-
 
1621
 
-
 
1622
static void cayman_cp_fini(struct radeon_device *rdev)
-
 
1623
{
-
 
1624
	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
-
 
1625
	cayman_cp_enable(rdev, false);
-
 
1626
	radeon_ring_fini(rdev, ring);
Line 1555... Line 1627...
1555
}
1627
	radeon_scratch_free(rdev, ring->rptr_save_reg);
1556
 
1628
}
1557
 
1629
 
1558
static int cayman_cp_resume(struct radeon_device *rdev)
1630
static int cayman_cp_resume(struct radeon_device *rdev)
Line 1615... Line 1687...
1615
 
1687
 
Line 1616... Line 1688...
1616
	WREG32(CP_DEBUG, (1 << 27));
1688
	WREG32(CP_DEBUG, (1 << 27));
1617
 
1689
 
1618
	/* set the wb address whether it's enabled or not */
1690
	/* set the wb address whether it's enabled or not */
Line 1619... Line 1691...
1619
	WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
1691
	WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
1620
		WREG32(SCRATCH_UMSK, 0xff);
1692
	WREG32(SCRATCH_UMSK, 0xff);
1621
 
1693
 
Line 1622... Line 1694...
1622
	for (i = 0; i < 3; ++i) {
1694
	for (i = 0; i < 3; ++i) {
1623
		uint32_t rb_cntl;
1695
		uint32_t rb_cntl;
1624
		uint64_t addr;
1696
		uint64_t addr;
1625
 
1697
 
1626
	/* Set ring buffer size */
1698
		/* Set ring buffer size */
1627
		ring = &rdev->ring[ridx[i]];
1699
		ring = &rdev->ring[ridx[i]];
Line 1643... Line 1715...
1643
		ring = &rdev->ring[ridx[i]];
1715
		ring = &rdev->ring[ridx[i]];
1644
		WREG32(cp_rb_base[i], ring->gpu_addr >> 8);
1716
		WREG32(cp_rb_base[i], ring->gpu_addr >> 8);
1645
	}
1717
	}
Line 1646... Line 1718...
1646
 
1718
 
1647
	for (i = 0; i < 3; ++i) {
1719
	for (i = 0; i < 3; ++i) {
1648
	/* Initialize the ring buffer's read and write pointers */
1720
		/* Initialize the ring buffer's read and write pointers */
1649
		ring = &rdev->ring[ridx[i]];
1721
		ring = &rdev->ring[ridx[i]];
Line 1650... Line 1722...
1650
		WREG32_P(cp_rb_cntl[i], RB_RPTR_WR_ENA, ~RB_RPTR_WR_ENA);
1722
		WREG32_P(cp_rb_cntl[i], RB_RPTR_WR_ENA, ~RB_RPTR_WR_ENA);
1651
 
1723
 
1652
		ring->wptr = 0;
1724
		ring->wptr = 0;
Line 1653... Line 1725...
1653
		WREG32(cp_rb_rptr[i], 0);
1725
		WREG32(cp_rb_rptr[i], 0);
1654
		WREG32(cp_rb_wptr[i], ring->wptr);
1726
		WREG32(cp_rb_wptr[i], ring->wptr);
1655
 
1727
 
Line 1656... Line 1728...
1656
	mdelay(1);
1728
		mdelay(1);
1657
		WREG32_P(cp_rb_cntl[i], 0, ~RB_RPTR_WR_ENA);
1729
		WREG32_P(cp_rb_cntl[i], 0, ~RB_RPTR_WR_ENA);
Line 1920... Line 1992...
1920
bool cayman_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
1992
bool cayman_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
1921
{
1993
{
1922
	u32 reset_mask = cayman_gpu_check_soft_reset(rdev);
1994
	u32 reset_mask = cayman_gpu_check_soft_reset(rdev);
Line 1923... Line 1995...
1923
 
1995
 
1924
	if (!(reset_mask & (RADEON_RESET_GFX |
1996
	if (!(reset_mask & (RADEON_RESET_GFX |
1925
					    RADEON_RESET_COMPUTE |
1997
			    RADEON_RESET_COMPUTE |
1926
			    RADEON_RESET_CP))) {
1998
			    RADEON_RESET_CP))) {
1927
		radeon_ring_lockup_update(rdev, ring);
1999
		radeon_ring_lockup_update(rdev, ring);
1928
		return false;
2000
		return false;
1929
	}
2001
	}
Line 1982... Line 2054...
1982
	if (r) {
2054
	if (r) {
1983
		dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
2055
		dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
1984
		return r;
2056
		return r;
1985
	}
2057
	}
Line 1986... Line 2058...
1986
 
2058
 
1987
//   r = rv770_uvd_resume(rdev);
2059
	r = uvd_v2_2_resume(rdev);
1988
//   if (!r) {
2060
	if (!r) {
1989
//       r = radeon_fence_driver_start_ring(rdev,
2061
		r = radeon_fence_driver_start_ring(rdev,
1990
//                          R600_RING_TYPE_UVD_INDEX);
2062
						   R600_RING_TYPE_UVD_INDEX);
1991
//       if (r)
2063
		if (r)
1992
//           dev_err(rdev->dev, "UVD fences init error (%d).\n", r);
2064
			dev_err(rdev->dev, "UVD fences init error (%d).\n", r);
1993
//   }
2065
	}
1994
//   if (r)
2066
	if (r)
-
 
2067
		rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
-
 
2068
 
-
 
2069
	if (rdev->family == CHIP_ARUBA) {
-
 
2070
		r = radeon_vce_resume(rdev);
-
 
2071
		if (!r)
-
 
2072
			r = vce_v1_0_resume(rdev);
-
 
2073
 
-
 
2074
		if (!r)
-
 
2075
			r = radeon_fence_driver_start_ring(rdev,
-
 
2076
							   TN_RING_TYPE_VCE1_INDEX);
-
 
2077
		if (!r)
-
 
2078
			r = radeon_fence_driver_start_ring(rdev,
-
 
2079
							   TN_RING_TYPE_VCE2_INDEX);
-
 
2080
 
-
 
2081
		if (r) {
-
 
2082
			dev_err(rdev->dev, "VCE init error (%d).\n", r);
-
 
2083
			rdev->ring[TN_RING_TYPE_VCE1_INDEX].ring_size = 0;
-
 
2084
			rdev->ring[TN_RING_TYPE_VCE2_INDEX].ring_size = 0;
-
 
2085
		}
Line 1995... Line 2086...
1995
//       rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
2086
	}
1996
 
2087
 
1997
	r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP1_INDEX);
2088
	r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP1_INDEX);
1998
	if (r) {
2089
	if (r) {
Line 2026... Line 2117...
2026
	}
2117
	}
Line 2027... Line 2118...
2027
 
2118
 
2028
	r = r600_irq_init(rdev);
2119
	r = r600_irq_init(rdev);
2029
	if (r) {
2120
	if (r) {
2030
		DRM_ERROR("radeon: IH init failed (%d).\n", r);
2121
		DRM_ERROR("radeon: IH init failed (%d).\n", r);
2031
//		radeon_irq_kms_fini(rdev);
2122
		radeon_irq_kms_fini(rdev);
2032
		return r;
2123
		return r;
2033
	}
2124
	}
Line 2034... Line 2125...
2034
	evergreen_irq_set(rdev);
2125
	evergreen_irq_set(rdev);
Line 2059... Line 2150...
2059
 
2150
 
2060
	r = cayman_dma_resume(rdev);
2151
	r = cayman_dma_resume(rdev);
2061
	if (r)
2152
	if (r)
Line -... Line 2153...
-
 
2153
		return r;
-
 
2154
 
-
 
2155
	ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
-
 
2156
	if (ring->ring_size) {
-
 
2157
		r = radeon_ring_init(rdev, ring, ring->ring_size, 0,
-
 
2158
				     RADEON_CP_PACKET2);
-
 
2159
		if (!r)
-
 
2160
			r = uvd_v1_0_init(rdev);
-
 
2161
		if (r)
-
 
2162
			DRM_ERROR("radeon: failed initializing UVD (%d).\n", r);
-
 
2163
	}
-
 
2164
 
-
 
2165
	if (rdev->family == CHIP_ARUBA) {
-
 
2166
		ring = &rdev->ring[TN_RING_TYPE_VCE1_INDEX];
-
 
2167
		if (ring->ring_size)
-
 
2168
			r = radeon_ring_init(rdev, ring, ring->ring_size, 0, 0x0);
-
 
2169
 
-
 
2170
		ring = &rdev->ring[TN_RING_TYPE_VCE2_INDEX];
-
 
2171
		if (ring->ring_size)
-
 
2172
			r = radeon_ring_init(rdev, ring, ring->ring_size, 0, 0x0);
-
 
2173
 
-
 
2174
		if (!r)
-
 
2175
			r = vce_v1_0_init(rdev);
-
 
2176
		if (r)
Line 2062... Line 2177...
2062
		return r;
2177
			DRM_ERROR("radeon: failed initializing VCE (%d).\n", r);
2063
 
2178
	}
2064
 
2179
 
2065
	r = radeon_ib_pool_init(rdev);
2180
	r = radeon_ib_pool_init(rdev);
Line 2167... Line 2282...
2167
 
2282
 
2168
	ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
2283
	ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
2169
	ring->ring_obj = NULL;
2284
	ring->ring_obj = NULL;
Line 2170... Line 2285...
2170
	r600_ring_init(rdev, ring, 64 * 1024);
2285
	r600_ring_init(rdev, ring, 64 * 1024);
2171
 
2286
 
2172
//   r = radeon_uvd_init(rdev);
2287
	r = radeon_uvd_init(rdev);
-
 
2288
	if (!r) {
-
 
2289
		ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
-
 
2290
		ring->ring_obj = NULL;
-
 
2291
		r600_ring_init(rdev, ring, 4096);
-
 
2292
	}
-
 
2293
 
-
 
2294
	if (rdev->family == CHIP_ARUBA) {
-
 
2295
		r = radeon_vce_init(rdev);
-
 
2296
		if (!r) {
-
 
2297
			ring = &rdev->ring[TN_RING_TYPE_VCE1_INDEX];
-
 
2298
			ring->ring_obj = NULL;
-
 
2299
			r600_ring_init(rdev, ring, 4096);
2173
//   if (!r) {
2300
 
2174
//       ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
2301
			ring = &rdev->ring[TN_RING_TYPE_VCE2_INDEX];
2175
//       ring->ring_obj = NULL;
2302
			ring->ring_obj = NULL;
-
 
2303
			r600_ring_init(rdev, ring, 4096);
Line 2176... Line 2304...
2176
//       r600_ring_init(rdev, ring, 4096);
2304
		}
2177
//   }
2305
	}
Line 2178... Line 2306...
2178
 
2306
 
Line 2185... Line 2313...
2185
 
2313
 
2186
	rdev->accel_working = true;
2314
	rdev->accel_working = true;
2187
	r = cayman_startup(rdev);
2315
	r = cayman_startup(rdev);
2188
	if (r) {
2316
	if (r) {
-
 
2317
		dev_err(rdev->dev, "disabling GPU acceleration\n");
-
 
2318
		cayman_cp_fini(rdev);
-
 
2319
		cayman_dma_fini(rdev);
-
 
2320
		r600_irq_fini(rdev);
-
 
2321
		if (rdev->flags & RADEON_IS_IGP)
-
 
2322
			sumo_rlc_fini(rdev);
-
 
2323
		radeon_wb_fini(rdev);
-
 
2324
		radeon_ib_pool_fini(rdev);
-
 
2325
		radeon_vm_manager_fini(rdev);
-
 
2326
		radeon_irq_kms_fini(rdev);
2189
		dev_err(rdev->dev, "disabling GPU acceleration\n");
2327
		cayman_pcie_gart_fini(rdev);
2190
		rdev->accel_working = false;
2328
		rdev->accel_working = false;
Line 2191... Line 2329...
2191
	}
2329
	}
2192
 
2330
 
Line 2203... Line 2341...
2203
	}
2341
	}
Line 2204... Line 2342...
2204
 
2342
 
2205
	return 0;
2343
	return 0;
Line -... Line 2344...
-
 
2344
}
-
 
2345
 
-
 
2346
void cayman_fini(struct radeon_device *rdev)
-
 
2347
{
-
 
2348
	radeon_pm_fini(rdev);
-
 
2349
	cayman_cp_fini(rdev);
-
 
2350
	cayman_dma_fini(rdev);
-
 
2351
	r600_irq_fini(rdev);
-
 
2352
	if (rdev->flags & RADEON_IS_IGP)
-
 
2353
		sumo_rlc_fini(rdev);
-
 
2354
	radeon_wb_fini(rdev);
-
 
2355
	radeon_vm_manager_fini(rdev);
-
 
2356
	radeon_ib_pool_fini(rdev);
-
 
2357
	radeon_irq_kms_fini(rdev);
-
 
2358
	uvd_v1_0_fini(rdev);
-
 
2359
	radeon_uvd_fini(rdev);
-
 
2360
	if (rdev->family == CHIP_ARUBA)
-
 
2361
		radeon_vce_fini(rdev);
-
 
2362
	cayman_pcie_gart_fini(rdev);
-
 
2363
	r600_vram_scratch_fini(rdev);
-
 
2364
	radeon_gem_fini(rdev);
-
 
2365
	radeon_fence_driver_fini(rdev);
-
 
2366
	radeon_bo_fini(rdev);
-
 
2367
	radeon_atombios_fini(rdev);
-
 
2368
	kfree(rdev->bios);
-
 
2369
	rdev->bios = NULL;
2206
}
2370
}
2207
 
2371
 
2208
/*
2372
/*
2209
 * vm
2373
 * vm
2210
 */
2374
 */
Line 2407... Line 2571...
2407
 
2571
 
2408
	/* bits 0-7 are the VM contexts0-7 */
2572
	/* bits 0-7 are the VM contexts0-7 */
2409
	radeon_ring_write(ring, PACKET0(VM_INVALIDATE_REQUEST, 0));
2573
	radeon_ring_write(ring, PACKET0(VM_INVALIDATE_REQUEST, 0));
Line -... Line 2574...
-
 
2574
	radeon_ring_write(ring, 1 << vm_id);
-
 
2575
 
-
 
2576
	/* wait for the invalidate to complete */
-
 
2577
	radeon_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
-
 
2578
	radeon_ring_write(ring, (WAIT_REG_MEM_FUNCTION(0) |  /* always */
-
 
2579
				 WAIT_REG_MEM_ENGINE(0))); /* me */
-
 
2580
	radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
-
 
2581
	radeon_ring_write(ring, 0);
-
 
2582
	radeon_ring_write(ring, 0); /* ref */
-
 
2583
	radeon_ring_write(ring, 0); /* mask */
2410
	radeon_ring_write(ring, 1 << vm_id);
2584
	radeon_ring_write(ring, 0x20); /* poll interval */
2411
 
2585
 
2412
	/* sync PFP to ME, otherwise we might get invalid PFP reads */
2586
	/* sync PFP to ME, otherwise we might get invalid PFP reads */
2413
	radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
2587
	radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
-
 
2588
	radeon_ring_write(ring, 0x0);
-
 
2589
}
-
 
2590
 
-
 
2591
int tn_set_vce_clocks(struct radeon_device *rdev, u32 evclk, u32 ecclk)
-
 
2592
{
-
 
2593
	struct atom_clock_dividers dividers;
-
 
2594
	int r, i;
-
 
2595
 
-
 
2596
        r = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
-
 
2597
					   ecclk, false, ÷rs);
-
 
2598
	if (r)
-
 
2599
		return r;
-
 
2600
 
-
 
2601
	for (i = 0; i < 100; i++) {
-
 
2602
		if (RREG32(CG_ECLK_STATUS) & ECLK_STATUS)
-
 
2603
			break;
-
 
2604
		mdelay(10);
-
 
2605
	}
-
 
2606
	if (i == 100)
-
 
2607
		return -ETIMEDOUT;
-
 
2608
 
-
 
2609
	WREG32_P(CG_ECLK_CNTL, dividers.post_div, ~(ECLK_DIR_CNTL_EN|ECLK_DIVIDER_MASK));
-
 
2610
 
-
 
2611
	for (i = 0; i < 100; i++) {
-
 
2612
		if (RREG32(CG_ECLK_STATUS) & ECLK_STATUS)
-
 
2613
			break;
-
 
2614
		mdelay(10);
-
 
2615
	}
-
 
2616
	if (i == 100)
-
 
2617
		return -ETIMEDOUT;
-
 
2618