Subversion Repositories Kolibri OS

Rev

Rev 2997 | Rev 3764 | Go to most recent revision | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 2997 Rev 3192
Line 810... Line 810...
810
}
810
}
811
/* We doesn't check that the GPU really needs a reset we simply do the
811
/* We doesn't check that the GPU really needs a reset we simply do the
812
 * reset, it's up to the caller to determine if the GPU needs one. We
812
 * reset, it's up to the caller to determine if the GPU needs one. We
813
 * might add an helper function to check that.
813
 * might add an helper function to check that.
814
 */
814
 */
815
static int r600_gpu_soft_reset(struct radeon_device *rdev)
815
static void r600_gpu_soft_reset_gfx(struct radeon_device *rdev)
816
{
816
{
817
	struct rv515_mc_save save;
-
 
818
	u32 grbm_busy_mask = S_008010_VC_BUSY(1) | S_008010_VGT_BUSY_NO_DMA(1) |
817
	u32 grbm_busy_mask = S_008010_VC_BUSY(1) | S_008010_VGT_BUSY_NO_DMA(1) |
819
				S_008010_VGT_BUSY(1) | S_008010_TA03_BUSY(1) |
818
				S_008010_VGT_BUSY(1) | S_008010_TA03_BUSY(1) |
820
				S_008010_TC_BUSY(1) | S_008010_SX_BUSY(1) |
819
				S_008010_TC_BUSY(1) | S_008010_SX_BUSY(1) |
821
				S_008010_SH_BUSY(1) | S_008010_SPI03_BUSY(1) |
820
				S_008010_SH_BUSY(1) | S_008010_SPI03_BUSY(1) |
822
				S_008010_SMX_BUSY(1) | S_008010_SC_BUSY(1) |
821
				S_008010_SMX_BUSY(1) | S_008010_SC_BUSY(1) |
Line 832... Line 831...
832
			S_008014_CB0_BUSY(1) | S_008014_CB1_BUSY(1) |
831
			S_008014_CB0_BUSY(1) | S_008014_CB1_BUSY(1) |
833
			S_008014_CB2_BUSY(1) | S_008014_CB3_BUSY(1);
832
			S_008014_CB2_BUSY(1) | S_008014_CB3_BUSY(1);
834
	u32 tmp;
833
	u32 tmp;
Line 835... Line 834...
835
 
834
 
836
	if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
835
	if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
Line 837... Line -...
837
		return 0;
-
 
838
 
836
		return;
839
	dev_info(rdev->dev, "GPU softreset \n");
837
 
840
	dev_info(rdev->dev, "  R_008010_GRBM_STATUS=0x%08X\n",
838
	dev_info(rdev->dev, "  R_008010_GRBM_STATUS      = 0x%08X\n",
841
		RREG32(R_008010_GRBM_STATUS));
839
		RREG32(R_008010_GRBM_STATUS));
842
	dev_info(rdev->dev, "  R_008014_GRBM_STATUS2=0x%08X\n",
840
	dev_info(rdev->dev, "  R_008014_GRBM_STATUS2     = 0x%08X\n",
Line 849... Line 847...
849
		RREG32(CP_STALLED_STAT2));
847
		RREG32(CP_STALLED_STAT2));
850
	dev_info(rdev->dev, "  R_00867C_CP_BUSY_STAT     = 0x%08X\n",
848
	dev_info(rdev->dev, "  R_00867C_CP_BUSY_STAT     = 0x%08X\n",
851
		RREG32(CP_BUSY_STAT));
849
		RREG32(CP_BUSY_STAT));
852
	dev_info(rdev->dev, "  R_008680_CP_STAT          = 0x%08X\n",
850
	dev_info(rdev->dev, "  R_008680_CP_STAT          = 0x%08X\n",
853
		RREG32(CP_STAT));
851
		RREG32(CP_STAT));
854
	rv515_mc_stop(rdev, &save);
-
 
855
	if (r600_mc_wait_for_idle(rdev)) {
-
 
856
		dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
-
 
857
	}
852
 
858
	/* Disable CP parsing/prefetching */
853
	/* Disable CP parsing/prefetching */
859
	WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
854
	WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
-
 
855
 
860
	/* Check if any of the rendering block is busy and reset it */
856
	/* Check if any of the rendering block is busy and reset it */
861
	if ((RREG32(R_008010_GRBM_STATUS) & grbm_busy_mask) ||
857
	if ((RREG32(R_008010_GRBM_STATUS) & grbm_busy_mask) ||
862
	    (RREG32(R_008014_GRBM_STATUS2) & grbm2_busy_mask)) {
858
	    (RREG32(R_008014_GRBM_STATUS2) & grbm2_busy_mask)) {
863
		tmp = S_008020_SOFT_RESET_CR(1) |
859
		tmp = S_008020_SOFT_RESET_CR(1) |
864
			S_008020_SOFT_RESET_DB(1) |
860
			S_008020_SOFT_RESET_DB(1) |
Line 884... Line 880...
884
	dev_info(rdev->dev, "R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
880
	dev_info(rdev->dev, "R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
885
	WREG32(R_008020_GRBM_SOFT_RESET, tmp);
881
	WREG32(R_008020_GRBM_SOFT_RESET, tmp);
886
	RREG32(R_008020_GRBM_SOFT_RESET);
882
	RREG32(R_008020_GRBM_SOFT_RESET);
887
	mdelay(15);
883
	mdelay(15);
888
	WREG32(R_008020_GRBM_SOFT_RESET, 0);
884
	WREG32(R_008020_GRBM_SOFT_RESET, 0);
889
	/* Wait a little for things to settle down */
-
 
890
	mdelay(1);
885
 
891
	dev_info(rdev->dev, "  R_008010_GRBM_STATUS=0x%08X\n",
886
	dev_info(rdev->dev, "  R_008010_GRBM_STATUS      = 0x%08X\n",
892
		RREG32(R_008010_GRBM_STATUS));
887
		RREG32(R_008010_GRBM_STATUS));
893
	dev_info(rdev->dev, "  R_008014_GRBM_STATUS2=0x%08X\n",
888
	dev_info(rdev->dev, "  R_008014_GRBM_STATUS2     = 0x%08X\n",
894
		RREG32(R_008014_GRBM_STATUS2));
889
		RREG32(R_008014_GRBM_STATUS2));
895
	dev_info(rdev->dev, "  R_000E50_SRBM_STATUS=0x%08X\n",
890
	dev_info(rdev->dev, "  R_000E50_SRBM_STATUS      = 0x%08X\n",
Line 900... Line 895...
900
		RREG32(CP_STALLED_STAT2));
895
		RREG32(CP_STALLED_STAT2));
901
	dev_info(rdev->dev, "  R_00867C_CP_BUSY_STAT     = 0x%08X\n",
896
	dev_info(rdev->dev, "  R_00867C_CP_BUSY_STAT     = 0x%08X\n",
902
		RREG32(CP_BUSY_STAT));
897
		RREG32(CP_BUSY_STAT));
903
	dev_info(rdev->dev, "  R_008680_CP_STAT          = 0x%08X\n",
898
	dev_info(rdev->dev, "  R_008680_CP_STAT          = 0x%08X\n",
904
		RREG32(CP_STAT));
899
		RREG32(CP_STAT));
-
 
900
 
-
 
901
}
-
 
902
 
-
 
903
static void r600_gpu_soft_reset_dma(struct radeon_device *rdev)
-
 
904
{
-
 
905
	u32 tmp;
-
 
906
 
-
 
907
	if (RREG32(DMA_STATUS_REG) & DMA_IDLE)
-
 
908
		return;
-
 
909
 
-
 
910
	dev_info(rdev->dev, "  R_00D034_DMA_STATUS_REG   = 0x%08X\n",
-
 
911
		RREG32(DMA_STATUS_REG));
-
 
912
 
-
 
913
	/* Disable DMA */
-
 
914
	tmp = RREG32(DMA_RB_CNTL);
-
 
915
	tmp &= ~DMA_RB_ENABLE;
-
 
916
	WREG32(DMA_RB_CNTL, tmp);
-
 
917
 
-
 
918
	/* Reset dma */
-
 
919
	if (rdev->family >= CHIP_RV770)
-
 
920
		WREG32(SRBM_SOFT_RESET, RV770_SOFT_RESET_DMA);
-
 
921
	else
-
 
922
		WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA);
-
 
923
	RREG32(SRBM_SOFT_RESET);
-
 
924
	udelay(50);
-
 
925
	WREG32(SRBM_SOFT_RESET, 0);
-
 
926
 
-
 
927
	dev_info(rdev->dev, "  R_00D034_DMA_STATUS_REG   = 0x%08X\n",
-
 
928
		RREG32(DMA_STATUS_REG));
-
 
929
}
-
 
930
 
-
 
931
static int r600_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
-
 
932
{
-
 
933
	struct rv515_mc_save save;
-
 
934
 
-
 
935
	if (reset_mask == 0)
-
 
936
		return 0;
-
 
937
 
-
 
938
	dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask);
-
 
939
 
-
 
940
	rv515_mc_stop(rdev, &save);
-
 
941
	if (r600_mc_wait_for_idle(rdev)) {
-
 
942
		dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
-
 
943
	}
-
 
944
 
-
 
945
	if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE))
-
 
946
		r600_gpu_soft_reset_gfx(rdev);
-
 
947
 
-
 
948
	if (reset_mask & RADEON_RESET_DMA)
-
 
949
		r600_gpu_soft_reset_dma(rdev);
-
 
950
 
-
 
951
	/* Wait a little for things to settle down */
-
 
952
	mdelay(1);
-
 
953
 
905
	rv515_mc_resume(rdev, &save);
954
	rv515_mc_resume(rdev, &save);
906
	return 0;
955
	return 0;
907
}
956
}
Line 908... Line 957...
908
 
957
 
Line 922... Line 971...
922
	/* force CP activities */
971
	/* force CP activities */
923
	radeon_ring_force_activity(rdev, ring);
972
	radeon_ring_force_activity(rdev, ring);
924
	return radeon_ring_test_lockup(rdev, ring);
973
	return radeon_ring_test_lockup(rdev, ring);
925
}
974
}
Line -... Line 975...
-
 
975
 
-
 
976
/**
-
 
977
 * r600_dma_is_lockup - Check if the DMA engine is locked up
-
 
978
 *
-
 
979
 * @rdev: radeon_device pointer
-
 
980
 * @ring: radeon_ring structure holding ring information
-
 
981
 *
-
 
982
 * Check if the async DMA engine is locked up (r6xx-evergreen).
-
 
983
 * Returns true if the engine appears to be locked up, false if not.
-
 
984
 */
-
 
985
bool r600_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
-
 
986
{
-
 
987
	u32 dma_status_reg;
-
 
988
 
-
 
989
	dma_status_reg = RREG32(DMA_STATUS_REG);
-
 
990
	if (dma_status_reg & DMA_IDLE) {
-
 
991
		radeon_ring_lockup_update(ring);
-
 
992
		return false;
-
 
993
	}
-
 
994
	/* force ring activities */
-
 
995
	radeon_ring_force_activity(rdev, ring);
-
 
996
	return radeon_ring_test_lockup(rdev, ring);
-
 
997
}
926
 
998
 
927
int r600_asic_reset(struct radeon_device *rdev)
999
int r600_asic_reset(struct radeon_device *rdev)
928
{
1000
{
-
 
1001
	return r600_gpu_soft_reset(rdev, (RADEON_RESET_GFX |
-
 
1002
					  RADEON_RESET_COMPUTE |
929
	return r600_gpu_soft_reset(rdev);
1003
					  RADEON_RESET_DMA));
Line 930... Line 1004...
930
}
1004
}
931
 
1005
 
932
u32 r6xx_remap_render_backend(struct radeon_device *rdev,
1006
u32 r6xx_remap_render_backend(struct radeon_device *rdev,
Line 976... Line 1050...
976
	return data;
1050
	return data;
977
}
1051
}
Line 978... Line 1052...
978
 
1052
 
979
int r600_count_pipe_bits(uint32_t val)
1053
int r600_count_pipe_bits(uint32_t val)
980
{
-
 
981
	int i, ret = 0;
-
 
982
 
-
 
983
	for (i = 0; i < 32; i++) {
-
 
984
		ret += val & 1;
-
 
985
		val >>= 1;
-
 
986
	}
1054
{
987
	return ret;
1055
	return hweight32(val);
Line 988... Line 1056...
988
}
1056
}
989
 
1057
 
990
static void r600_gpu_init(struct radeon_device *rdev)
1058
static void r600_gpu_init(struct radeon_device *rdev)
Line 1146... Line 1214...
1146
 
1214
 
1147
	rdev->config.r600.tile_config = tiling_config;
1215
	rdev->config.r600.tile_config = tiling_config;
1148
	WREG32(GB_TILING_CONFIG, tiling_config);
1216
	WREG32(GB_TILING_CONFIG, tiling_config);
1149
	WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff);
1217
	WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff);
-
 
1218
	WREG32(HDP_TILING_CONFIG, tiling_config & 0xffff);
Line 1150... Line 1219...
1150
	WREG32(HDP_TILING_CONFIG, tiling_config & 0xffff);
1219
	WREG32(DMA_TILING_CONFIG, tiling_config & 0xffff);
1151
 
1220
 
1152
	tmp = R6XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
1221
	tmp = R6XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
Line 1420... Line 1489...
1420
/*
1489
/*
1421
 * CP & Ring
1490
 * CP & Ring
1422
 */
1491
 */
1423
void r600_cp_stop(struct radeon_device *rdev)
1492
void r600_cp_stop(struct radeon_device *rdev)
1424
{
1493
{
1425
//   radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
1494
	radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
1426
	WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
1495
	WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
1427
	WREG32(SCRATCH_UMSK, 0);
1496
	WREG32(SCRATCH_UMSK, 0);
-
 
1497
	rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
1428
}
1498
}
Line 1429... Line 1499...
1429
 
1499
 
1430
int r600_init_microcode(struct radeon_device *rdev)
1500
int r600_init_microcode(struct radeon_device *rdev)
1431
{
1501
{
Line 1748... Line 1818...
1748
	r600_cp_stop(rdev);
1818
	r600_cp_stop(rdev);
1749
	radeon_ring_fini(rdev, ring);
1819
	radeon_ring_fini(rdev, ring);
1750
	radeon_scratch_free(rdev, ring->rptr_save_reg);
1820
	radeon_scratch_free(rdev, ring->rptr_save_reg);
1751
}
1821
}
Line -... Line 1822...
-
 
1822
 
-
 
1823
/*
-
 
1824
 * DMA
-
 
1825
 * Starting with R600, the GPU has an asynchronous
-
 
1826
 * DMA engine.  The programming model is very similar
-
 
1827
 * to the 3D engine (ring buffer, IBs, etc.), but the
-
 
1828
 * DMA controller has it's own packet format that is
-
 
1829
 * different form the PM4 format used by the 3D engine.
-
 
1830
 * It supports copying data, writing embedded data,
-
 
1831
 * solid fills, and a number of other things.  It also
-
 
1832
 * has support for tiling/detiling of buffers.
-
 
1833
 */
-
 
1834
/**
-
 
1835
 * r600_dma_stop - stop the async dma engine
-
 
1836
 *
-
 
1837
 * @rdev: radeon_device pointer
-
 
1838
 *
-
 
1839
 * Stop the async dma engine (r6xx-evergreen).
-
 
1840
 */
-
 
1841
void r600_dma_stop(struct radeon_device *rdev)
-
 
1842
{
-
 
1843
	u32 rb_cntl = RREG32(DMA_RB_CNTL);
-
 
1844
 
-
 
1845
	radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
-
 
1846
 
-
 
1847
	rb_cntl &= ~DMA_RB_ENABLE;
-
 
1848
	WREG32(DMA_RB_CNTL, rb_cntl);
-
 
1849
 
-
 
1850
	rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false;
-
 
1851
}
-
 
1852
 
-
 
1853
/**
-
 
1854
 * r600_dma_resume - setup and start the async dma engine
-
 
1855
 *
-
 
1856
 * @rdev: radeon_device pointer
-
 
1857
 *
-
 
1858
 * Set up the DMA ring buffer and enable it. (r6xx-evergreen).
-
 
1859
 * Returns 0 for success, error for failure.
-
 
1860
 */
-
 
1861
int r600_dma_resume(struct radeon_device *rdev)
-
 
1862
{
-
 
1863
	struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
-
 
1864
	u32 rb_cntl, dma_cntl;
-
 
1865
	u32 rb_bufsz;
-
 
1866
	int r;
-
 
1867
 
-
 
1868
	/* Reset dma */
-
 
1869
	if (rdev->family >= CHIP_RV770)
-
 
1870
		WREG32(SRBM_SOFT_RESET, RV770_SOFT_RESET_DMA);
-
 
1871
	else
-
 
1872
		WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA);
-
 
1873
	RREG32(SRBM_SOFT_RESET);
-
 
1874
	udelay(50);
-
 
1875
	WREG32(SRBM_SOFT_RESET, 0);
-
 
1876
 
-
 
1877
	WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL, 0);
-
 
1878
	WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL, 0);
-
 
1879
 
-
 
1880
	/* Set ring buffer size in dwords */
-
 
1881
	rb_bufsz = drm_order(ring->ring_size / 4);
-
 
1882
	rb_cntl = rb_bufsz << 1;
-
 
1883
#ifdef __BIG_ENDIAN
-
 
1884
	rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE;
-
 
1885
#endif
-
 
1886
	WREG32(DMA_RB_CNTL, rb_cntl);
-
 
1887
 
-
 
1888
	/* Initialize the ring buffer's read and write pointers */
-
 
1889
	WREG32(DMA_RB_RPTR, 0);
-
 
1890
	WREG32(DMA_RB_WPTR, 0);
-
 
1891
 
-
 
1892
	/* set the wb address whether it's enabled or not */
-
 
1893
	WREG32(DMA_RB_RPTR_ADDR_HI,
-
 
1894
	       upper_32_bits(rdev->wb.gpu_addr + R600_WB_DMA_RPTR_OFFSET) & 0xFF);
-
 
1895
	WREG32(DMA_RB_RPTR_ADDR_LO,
-
 
1896
	       ((rdev->wb.gpu_addr + R600_WB_DMA_RPTR_OFFSET) & 0xFFFFFFFC));
-
 
1897
 
-
 
1898
	if (rdev->wb.enabled)
-
 
1899
		rb_cntl |= DMA_RPTR_WRITEBACK_ENABLE;
-
 
1900
 
-
 
1901
	WREG32(DMA_RB_BASE, ring->gpu_addr >> 8);
-
 
1902
 
-
 
1903
	/* enable DMA IBs */
-
 
1904
	WREG32(DMA_IB_CNTL, DMA_IB_ENABLE);
-
 
1905
 
-
 
1906
	dma_cntl = RREG32(DMA_CNTL);
-
 
1907
	dma_cntl &= ~CTXEMPTY_INT_ENABLE;
-
 
1908
	WREG32(DMA_CNTL, dma_cntl);
-
 
1909
 
-
 
1910
	if (rdev->family >= CHIP_RV770)
-
 
1911
		WREG32(DMA_MODE, 1);
-
 
1912
 
-
 
1913
	ring->wptr = 0;
-
 
1914
	WREG32(DMA_RB_WPTR, ring->wptr << 2);
-
 
1915
 
-
 
1916
	ring->rptr = RREG32(DMA_RB_RPTR) >> 2;
-
 
1917
 
-
 
1918
	WREG32(DMA_RB_CNTL, rb_cntl | DMA_RB_ENABLE);
-
 
1919
 
-
 
1920
	ring->ready = true;
-
 
1921
 
-
 
1922
	r = radeon_ring_test(rdev, R600_RING_TYPE_DMA_INDEX, ring);
-
 
1923
	if (r) {
-
 
1924
		ring->ready = false;
-
 
1925
		return r;
-
 
1926
	}
-
 
1927
 
-
 
1928
	radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
-
 
1929
 
-
 
1930
	return 0;
-
 
1931
}
-
 
1932
 
-
 
1933
/**
-
 
1934
 * r600_dma_fini - tear down the async dma engine
-
 
1935
 *
-
 
1936
 * @rdev: radeon_device pointer
-
 
1937
 *
-
 
1938
 * Stop the async dma engine and free the ring (r6xx-evergreen).
-
 
1939
 */
-
 
1940
void r600_dma_fini(struct radeon_device *rdev)
-
 
1941
{
-
 
1942
	r600_dma_stop(rdev);
-
 
1943
	radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]);
Line 1752... Line 1944...
1752
 
1944
}
1753
 
1945
 
1754
/*
1946
/*
1755
 * GPU scratch registers helpers function.
1947
 * GPU scratch registers helpers function.
Line 1804... Line 1996...
1804
	}
1996
	}
1805
	radeon_scratch_free(rdev, scratch);
1997
	radeon_scratch_free(rdev, scratch);
1806
	return r;
1998
	return r;
1807
}
1999
}
Line -... Line 2000...
-
 
2000
 
-
 
2001
/**
-
 
2002
 * r600_dma_ring_test - simple async dma engine test
-
 
2003
 *
-
 
2004
 * @rdev: radeon_device pointer
-
 
2005
 * @ring: radeon_ring structure holding ring information
-
 
2006
 *
-
 
2007
 * Test the DMA engine by writing using it to write an
-
 
2008
 * value to memory. (r6xx-SI).
-
 
2009
 * Returns 0 for success, error for failure.
-
 
2010
 */
-
 
2011
int r600_dma_ring_test(struct radeon_device *rdev,
-
 
2012
		       struct radeon_ring *ring)
-
 
2013
{
-
 
2014
	unsigned i;
-
 
2015
	int r;
-
 
2016
	void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
-
 
2017
	u32 tmp;
-
 
2018
 
-
 
2019
	if (!ptr) {
-
 
2020
		DRM_ERROR("invalid vram scratch pointer\n");
-
 
2021
		return -EINVAL;
-
 
2022
	}
-
 
2023
 
-
 
2024
	tmp = 0xCAFEDEAD;
-
 
2025
	writel(tmp, ptr);
-
 
2026
 
-
 
2027
	r = radeon_ring_lock(rdev, ring, 4);
-
 
2028
	if (r) {
-
 
2029
		DRM_ERROR("radeon: dma failed to lock ring %d (%d).\n", ring->idx, r);
-
 
2030
		return r;
-
 
2031
	}
-
 
2032
	radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
-
 
2033
	radeon_ring_write(ring, rdev->vram_scratch.gpu_addr & 0xfffffffc);
-
 
2034
	radeon_ring_write(ring, upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xff);
-
 
2035
	radeon_ring_write(ring, 0xDEADBEEF);
-
 
2036
	radeon_ring_unlock_commit(rdev, ring);
-
 
2037
 
-
 
2038
	for (i = 0; i < rdev->usec_timeout; i++) {
-
 
2039
		tmp = readl(ptr);
-
 
2040
		if (tmp == 0xDEADBEEF)
-
 
2041
			break;
-
 
2042
		DRM_UDELAY(1);
-
 
2043
	}
-
 
2044
 
-
 
2045
	if (i < rdev->usec_timeout) {
-
 
2046
		DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
-
 
2047
	} else {
-
 
2048
		DRM_ERROR("radeon: ring %d test failed (0x%08X)\n",
-
 
2049
			  ring->idx, tmp);
-
 
2050
		r = -EINVAL;
-
 
2051
	}
-
 
2052
	return r;
-
 
2053
}
-
 
2054
 
-
 
2055
/*
-
 
2056
 * CP fences/semaphores
-
 
2057
 */
1808
 
2058
 
1809
void r600_fence_ring_emit(struct radeon_device *rdev,
2059
void r600_fence_ring_emit(struct radeon_device *rdev,
1810
			  struct radeon_fence *fence)
2060
			  struct radeon_fence *fence)
1811
{
2061
{
Line 1867... Line 2117...
1867
	radeon_ring_write(ring, PACKET3(PACKET3_MEM_SEMAPHORE, 1));
2117
	radeon_ring_write(ring, PACKET3(PACKET3_MEM_SEMAPHORE, 1));
1868
	radeon_ring_write(ring, addr & 0xffffffff);
2118
	radeon_ring_write(ring, addr & 0xffffffff);
1869
	radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | sel);
2119
	radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | sel);
1870
}
2120
}
Line -... Line 2121...
-
 
2121
 
-
 
2122
/*
-
 
2123
 * DMA fences/semaphores
-
 
2124
 */
-
 
2125
 
-
 
2126
/**
-
 
2127
 * r600_dma_fence_ring_emit - emit a fence on the DMA ring
-
 
2128
 *
-
 
2129
 * @rdev: radeon_device pointer
-
 
2130
 * @fence: radeon fence object
-
 
2131
 *
-
 
2132
 * Add a DMA fence packet to the ring to write
-
 
2133
 * the fence seq number and DMA trap packet to generate
-
 
2134
 * an interrupt if needed (r6xx-r7xx).
-
 
2135
 */
-
 
2136
void r600_dma_fence_ring_emit(struct radeon_device *rdev,
-
 
2137
			      struct radeon_fence *fence)
-
 
2138
{
-
 
2139
	struct radeon_ring *ring = &rdev->ring[fence->ring];
-
 
2140
	u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
-
 
2141
 
-
 
2142
	/* write the fence */
-
 
2143
	radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0, 0));
-
 
2144
	radeon_ring_write(ring, addr & 0xfffffffc);
-
 
2145
	radeon_ring_write(ring, (upper_32_bits(addr) & 0xff));
-
 
2146
	radeon_ring_write(ring, lower_32_bits(fence->seq));
-
 
2147
	/* generate an interrupt */
-
 
2148
	radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0, 0));
-
 
2149
}
-
 
2150
 
-
 
2151
/**
-
 
2152
 * r600_dma_semaphore_ring_emit - emit a semaphore on the dma ring
-
 
2153
 *
-
 
2154
 * @rdev: radeon_device pointer
-
 
2155
 * @ring: radeon_ring structure holding ring information
-
 
2156
 * @semaphore: radeon semaphore object
-
 
2157
 * @emit_wait: wait or signal semaphore
-
 
2158
 *
-
 
2159
 * Add a DMA semaphore packet to the ring wait on or signal
-
 
2160
 * other rings (r6xx-SI).
-
 
2161
 */
-
 
2162
void r600_dma_semaphore_ring_emit(struct radeon_device *rdev,
-
 
2163
				  struct radeon_ring *ring,
-
 
2164
				  struct radeon_semaphore *semaphore,
-
 
2165
				  bool emit_wait)
-
 
2166
{
-
 
2167
	u64 addr = semaphore->gpu_addr;
-
 
2168
	u32 s = emit_wait ? 0 : 1;
-
 
2169
 
-
 
2170
	radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SEMAPHORE, 0, s, 0));
-
 
2171
	radeon_ring_write(ring, addr & 0xfffffffc);
-
 
2172
	radeon_ring_write(ring, upper_32_bits(addr) & 0xff);
-
 
2173
}
1871
 
2174
 
1872
int r600_copy_blit(struct radeon_device *rdev,
2175
int r600_copy_blit(struct radeon_device *rdev,
1873
		   uint64_t src_offset,
2176
		   uint64_t src_offset,
1874
		   uint64_t dst_offset,
2177
		   uint64_t dst_offset,
1875
		   unsigned num_gpu_pages,
2178
		   unsigned num_gpu_pages,
Line 1886... Line 2189...
1886
	r600_kms_blit_copy(rdev, src_offset, dst_offset, num_gpu_pages, vb);
2189
	r600_kms_blit_copy(rdev, src_offset, dst_offset, num_gpu_pages, vb);
1887
	r600_blit_done_copy(rdev, fence, vb, sem);
2190
	r600_blit_done_copy(rdev, fence, vb, sem);
1888
	return 0;
2191
	return 0;
1889
}
2192
}
Line -... Line 2193...
-
 
2193
 
-
 
2194
/**
-
 
2195
 * r600_copy_dma - copy pages using the DMA engine
-
 
2196
 *
-
 
2197
 * @rdev: radeon_device pointer
-
 
2198
 * @src_offset: src GPU address
-
 
2199
 * @dst_offset: dst GPU address
-
 
2200
 * @num_gpu_pages: number of GPU pages to xfer
-
 
2201
 * @fence: radeon fence object
-
 
2202
 *
-
 
2203
 * Copy GPU paging using the DMA engine (r6xx).
-
 
2204
 * Used by the radeon ttm implementation to move pages if
-
 
2205
 * registered as the asic copy callback.
-
 
2206
 */
-
 
2207
int r600_copy_dma(struct radeon_device *rdev,
-
 
2208
		  uint64_t src_offset, uint64_t dst_offset,
-
 
2209
		  unsigned num_gpu_pages,
-
 
2210
		  struct radeon_fence **fence)
-
 
2211
{
-
 
2212
	struct radeon_semaphore *sem = NULL;
-
 
2213
	int ring_index = rdev->asic->copy.dma_ring_index;
-
 
2214
	struct radeon_ring *ring = &rdev->ring[ring_index];
-
 
2215
	u32 size_in_dw, cur_size_in_dw;
-
 
2216
	int i, num_loops;
-
 
2217
	int r = 0;
-
 
2218
 
-
 
2219
	r = radeon_semaphore_create(rdev, &sem);
-
 
2220
	if (r) {
-
 
2221
		DRM_ERROR("radeon: moving bo (%d).\n", r);
-
 
2222
		return r;
-
 
2223
	}
-
 
2224
 
-
 
2225
	size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
-
 
2226
	num_loops = DIV_ROUND_UP(size_in_dw, 0xFFFE);
-
 
2227
	r = radeon_ring_lock(rdev, ring, num_loops * 4 + 8);
-
 
2228
	if (r) {
-
 
2229
		DRM_ERROR("radeon: moving bo (%d).\n", r);
-
 
2230
		radeon_semaphore_free(rdev, &sem, NULL);
-
 
2231
		return r;
-
 
2232
	}
-
 
2233
 
-
 
2234
	if (radeon_fence_need_sync(*fence, ring->idx)) {
-
 
2235
		radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
-
 
2236
					    ring->idx);
-
 
2237
		radeon_fence_note_sync(*fence, ring->idx);
-
 
2238
	} else {
-
 
2239
		radeon_semaphore_free(rdev, &sem, NULL);
-
 
2240
	}
-
 
2241
 
-
 
2242
	for (i = 0; i < num_loops; i++) {
-
 
2243
		cur_size_in_dw = size_in_dw;
-
 
2244
		if (cur_size_in_dw > 0xFFFE)
-
 
2245
			cur_size_in_dw = 0xFFFE;
-
 
2246
		size_in_dw -= cur_size_in_dw;
-
 
2247
		radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, 0, cur_size_in_dw));
-
 
2248
		radeon_ring_write(ring, dst_offset & 0xfffffffc);
-
 
2249
		radeon_ring_write(ring, src_offset & 0xfffffffc);
-
 
2250
		radeon_ring_write(ring, (((upper_32_bits(dst_offset) & 0xff) << 16) |
-
 
2251
					 (upper_32_bits(src_offset) & 0xff)));
-
 
2252
		src_offset += cur_size_in_dw * 4;
-
 
2253
		dst_offset += cur_size_in_dw * 4;
-
 
2254
	}
-
 
2255
 
-
 
2256
	r = radeon_fence_emit(rdev, fence, ring->idx);
-
 
2257
	if (r) {
-
 
2258
		radeon_ring_unlock_undo(rdev, ring);
-
 
2259
		return r;
-
 
2260
	}
-
 
2261
 
-
 
2262
	radeon_ring_unlock_commit(rdev, ring);
-
 
2263
	radeon_semaphore_free(rdev, &sem, *fence);
-
 
2264
 
-
 
2265
	return r;
-
 
2266
}
1890
 
2267
 
1891
int r600_set_surface_reg(struct radeon_device *rdev, int reg,
2268
int r600_set_surface_reg(struct radeon_device *rdev, int reg,
1892
			 uint32_t tiling_flags, uint32_t pitch,
2269
			 uint32_t tiling_flags, uint32_t pitch,
1893
			 uint32_t offset, uint32_t obj_size)
2270
			 uint32_t offset, uint32_t obj_size)
1894
{
2271
{
Line 1901... Line 2278...
1901
	/* FIXME: implement */
2278
	/* FIXME: implement */
1902
}
2279
}
Line 1903... Line 2280...
1903
 
2280
 
1904
static int r600_startup(struct radeon_device *rdev)
2281
static int r600_startup(struct radeon_device *rdev)
1905
{
2282
{
1906
	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
2283
	struct radeon_ring *ring;
Line 1907... Line 2284...
1907
	int r;
2284
	int r;
1908
 
2285
 
Line 1936... Line 2313...
1936
	/* allocate wb buffer */
2313
	/* allocate wb buffer */
1937
	r = radeon_wb_init(rdev);
2314
	r = radeon_wb_init(rdev);
1938
	if (r)
2315
	if (r)
1939
		return r;
2316
		return r;
Line -... Line 2317...
-
 
2317
 
-
 
2318
	r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
-
 
2319
	if (r) {
-
 
2320
		dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
-
 
2321
		return r;
-
 
2322
	}
-
 
2323
 
-
 
2324
	r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX);
-
 
2325
	if (r) {
-
 
2326
		dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
-
 
2327
		return r;
-
 
2328
	}
1940
 
2329
 
1941
	/* Enable IRQ */
2330
	/* Enable IRQ */
1942
	r = r600_irq_init(rdev);
2331
	r = r600_irq_init(rdev);
1943
	if (r) {
2332
	if (r) {
1944
		DRM_ERROR("radeon: IH init failed (%d).\n", r);
2333
		DRM_ERROR("radeon: IH init failed (%d).\n", r);
1945
//		radeon_irq_kms_fini(rdev);
2334
//		radeon_irq_kms_fini(rdev);
1946
		return r;
2335
		return r;
1947
	}
2336
	}
Line -... Line 2337...
-
 
2337
	r600_irq_set(rdev);
1948
	r600_irq_set(rdev);
2338
 
1949
 
2339
	ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
1950
	r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
2340
	r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
-
 
2341
			     R600_CP_RB_RPTR, R600_CP_RB_WPTR,
-
 
2342
			     0, 0xfffff, RADEON_CP_PACKET2);
Line -... Line 2343...
-
 
2343
	if (r)
-
 
2344
		return r;
-
 
2345
 
-
 
2346
	ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
1951
			     R600_CP_RB_RPTR, R600_CP_RB_WPTR,
2347
	r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
1952
			     0, 0xfffff, RADEON_CP_PACKET2);
2348
			     DMA_RB_RPTR, DMA_RB_WPTR,
-
 
2349
			     2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
1953
 
2350
	if (r)
1954
	if (r)
2351
		return r;
1955
		return r;
2352
 
1956
	r = r600_cp_load_microcode(rdev);
2353
	r = r600_cp_load_microcode(rdev);
1957
	if (r)
2354
	if (r)
1958
		return r;
2355
		return r;
Line -... Line 2356...
-
 
2356
	r = r600_cp_resume(rdev);
-
 
2357
	if (r)
-
 
2358
		return r;
-
 
2359
 
-
 
2360
	r = r600_dma_resume(rdev);
-
 
2361
	if (r)
-
 
2362
		return r;
-
 
2363
 
-
 
2364
	r = radeon_ib_pool_init(rdev);
1959
	r = r600_cp_resume(rdev);
2365
	if (r) {
1960
	if (r)
2366
		dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
Line 1961... Line 2367...
1961
		return r;
2367
		return r;
1962
 
2368
	}
Line 2044... Line 2450...
2044
		return r;
2450
		return r;
Line 2045... Line 2451...
2045
 
2451
 
2046
	rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
2452
	rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
Line -... Line 2453...
-
 
2453
	r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
-
 
2454
 
-
 
2455
	rdev->ring[R600_RING_TYPE_DMA_INDEX].ring_obj = NULL;
2047
	r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
2456
	r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX], 64 * 1024);
2048
 
2457
 
Line 2049... Line 2458...
2049
	rdev->ih.ring_obj = NULL;
2458
	rdev->ih.ring_obj = NULL;
2050
	r600_ih_ring_init(rdev, 64 * 1024);
2459
	r600_ih_ring_init(rdev, 64 * 1024);
Line 2148... Line 2557...
2148
free_scratch:
2557
free_scratch:
2149
	radeon_scratch_free(rdev, scratch);
2558
	radeon_scratch_free(rdev, scratch);
2150
	return r;
2559
	return r;
2151
}
2560
}
Line -... Line 2561...
-
 
2561
 
-
 
2562
/**
-
 
2563
 * r600_dma_ib_test - test an IB on the DMA engine
-
 
2564
 *
-
 
2565
 * @rdev: radeon_device pointer
-
 
2566
 * @ring: radeon_ring structure holding ring information
-
 
2567
 *
-
 
2568
 * Test a simple IB in the DMA ring (r6xx-SI).
-
 
2569
 * Returns 0 on success, error on failure.
-
 
2570
 */
-
 
2571
int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
-
 
2572
{
-
 
2573
	struct radeon_ib ib;
-
 
2574
	unsigned i;
-
 
2575
	int r;
-
 
2576
	void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
-
 
2577
	u32 tmp = 0;
-
 
2578
 
-
 
2579
    ENTER();
-
 
2580
 
-
 
2581
	if (!ptr) {
-
 
2582
		DRM_ERROR("invalid vram scratch pointer\n");
-
 
2583
		return -EINVAL;
-
 
2584
	}
-
 
2585
 
-
 
2586
	tmp = 0xCAFEDEAD;
-
 
2587
	writel(tmp, ptr);
-
 
2588
 
-
 
2589
	r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256);
-
 
2590
	if (r) {
-
 
2591
		DRM_ERROR("radeon: failed to get ib (%d).\n", r);
-
 
2592
		return r;
-
 
2593
	}
-
 
2594
 
-
 
2595
	ib.ptr[0] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1);
-
 
2596
	ib.ptr[1] = rdev->vram_scratch.gpu_addr & 0xfffffffc;
-
 
2597
	ib.ptr[2] = upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xff;
-
 
2598
	ib.ptr[3] = 0xDEADBEEF;
-
 
2599
	ib.length_dw = 4;
-
 
2600
 
-
 
2601
	r = radeon_ib_schedule(rdev, &ib, NULL);
-
 
2602
	if (r) {
-
 
2603
		radeon_ib_free(rdev, &ib);
-
 
2604
		DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
-
 
2605
		return r;
-
 
2606
	}
-
 
2607
	r = radeon_fence_wait(ib.fence, false);
-
 
2608
	if (r) {
-
 
2609
		DRM_ERROR("radeon: fence wait failed (%d).\n", r);
-
 
2610
		return r;
-
 
2611
	}
-
 
2612
	for (i = 0; i < rdev->usec_timeout; i++) {
-
 
2613
		tmp = readl(ptr);
-
 
2614
		if (tmp == 0xDEADBEEF)
-
 
2615
			break;
-
 
2616
		DRM_UDELAY(1);
-
 
2617
	}
-
 
2618
	if (i < rdev->usec_timeout) {
-
 
2619
		DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib.fence->ring, i);
-
 
2620
	} else {
-
 
2621
		DRM_ERROR("radeon: ib test failed (0x%08X)\n", tmp);
-
 
2622
		r = -EINVAL;
-
 
2623
	}
-
 
2624
	radeon_ib_free(rdev, &ib);
-
 
2625
 
-
 
2626
    LEAVE();
-
 
2627
 
-
 
2628
	return r;
-
 
2629
}
-
 
2630
 
-
 
2631
/**
-
 
2632
 * r600_dma_ring_ib_execute - Schedule an IB on the DMA engine
-
 
2633
 *
-
 
2634
 * @rdev: radeon_device pointer
-
 
2635
 * @ib: IB object to schedule
-
 
2636
 *
-
 
2637
 * Schedule an IB in the DMA ring (r6xx-r7xx).
-
 
2638
 */
-
 
2639
void r600_dma_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
-
 
2640
{
-
 
2641
	struct radeon_ring *ring = &rdev->ring[ib->ring];
-
 
2642
 
-
 
2643
	if (rdev->wb.enabled) {
-
 
2644
		u32 next_rptr = ring->wptr + 4;
-
 
2645
		while ((next_rptr & 7) != 5)
-
 
2646
			next_rptr++;
-
 
2647
		next_rptr += 3;
-
 
2648
		radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
-
 
2649
		radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
-
 
2650
		radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff);
-
 
2651
		radeon_ring_write(ring, next_rptr);
-
 
2652
	}
-
 
2653
 
-
 
2654
	/* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
-
 
2655
	 * Pad as necessary with NOPs.
-
 
2656
	 */
-
 
2657
	while ((ring->wptr & 7) != 5)
-
 
2658
		radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
-
 
2659
	radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_INDIRECT_BUFFER, 0, 0, 0));
-
 
2660
	radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
-
 
2661
	radeon_ring_write(ring, (ib->length_dw << 16) | (upper_32_bits(ib->gpu_addr) & 0xFF));
-
 
2662
 
-
 
2663
}
2152
 
2664
 
2153
/*
2665
/*
2154
 * Interrupts
2666
 * Interrupts
2155
 *
2667
 *
2156
 * Interrupts use a ring buffer on r6xx/r7xx hardware.  It works pretty
2668
 * Interrupts use a ring buffer on r6xx/r7xx hardware.  It works pretty
Line 2339... Line 2851...
2339
static void r600_disable_interrupt_state(struct radeon_device *rdev)
2851
static void r600_disable_interrupt_state(struct radeon_device *rdev)
2340
{
2852
{
2341
	u32 tmp;
2853
	u32 tmp;
Line 2342... Line 2854...
2342
 
2854
 
-
 
2855
	WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
-
 
2856
	tmp = RREG32(DMA_CNTL) & ~TRAP_ENABLE;
2343
	WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
2857
	WREG32(DMA_CNTL, tmp);
2344
	WREG32(GRBM_INT_CNTL, 0);
2858
	WREG32(GRBM_INT_CNTL, 0);
2345
	WREG32(DxMODE_INT_MASK, 0);
2859
	WREG32(DxMODE_INT_MASK, 0);
2346
	WREG32(D1GRPH_INTERRUPT_CONTROL, 0);
2860
	WREG32(D1GRPH_INTERRUPT_CONTROL, 0);
2347
	WREG32(D2GRPH_INTERRUPT_CONTROL, 0);
2861
	WREG32(D2GRPH_INTERRUPT_CONTROL, 0);
Line 2467... Line 2981...
2467
	u32 mode_int = 0;
2981
	u32 mode_int = 0;
2468
	u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0;
2982
	u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0;
2469
	u32 grbm_int_cntl = 0;
2983
	u32 grbm_int_cntl = 0;
2470
	u32 hdmi0, hdmi1;
2984
	u32 hdmi0, hdmi1;
2471
	u32 d1grph = 0, d2grph = 0;
2985
	u32 d1grph = 0, d2grph = 0;
-
 
2986
	u32 dma_cntl;
Line 2472... Line 2987...
2472
 
2987
 
2473
	if (!rdev->irq.installed) {
2988
	if (!rdev->irq.installed) {
2474
		WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
2989
		WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
2475
		return -EINVAL;
2990
		return -EINVAL;
Line 2501... Line 3016...
2501
		hpd2 = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & ~DC_HPDx_INT_EN;
3016
		hpd2 = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & ~DC_HPDx_INT_EN;
2502
		hpd3 = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & ~DC_HPDx_INT_EN;
3017
		hpd3 = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & ~DC_HPDx_INT_EN;
2503
		hdmi0 = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3018
		hdmi0 = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
2504
		hdmi1 = RREG32(HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3019
		hdmi1 = RREG32(HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
2505
	}
3020
	}
-
 
3021
	dma_cntl = RREG32(DMA_CNTL) & ~TRAP_ENABLE;
Line 2506... Line 3022...
2506
 
3022
 
2507
	if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
3023
	if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
2508
		DRM_DEBUG("r600_irq_set: sw int\n");
3024
		DRM_DEBUG("r600_irq_set: sw int\n");
2509
		cp_int_cntl |= RB_INT_ENABLE;
3025
		cp_int_cntl |= RB_INT_ENABLE;
2510
		cp_int_cntl |= TIME_STAMP_INT_ENABLE;
3026
		cp_int_cntl |= TIME_STAMP_INT_ENABLE;
-
 
3027
	}
-
 
3028
 
-
 
3029
	if (atomic_read(&rdev->irq.ring_int[R600_RING_TYPE_DMA_INDEX])) {
-
 
3030
		DRM_DEBUG("r600_irq_set: sw int dma\n");
-
 
3031
		dma_cntl |= TRAP_ENABLE;
-
 
3032
	}
2511
	}
3033
 
2512
	if (rdev->irq.crtc_vblank_int[0] ||
3034
	if (rdev->irq.crtc_vblank_int[0] ||
2513
	    atomic_read(&rdev->irq.pflip[0])) {
3035
	    atomic_read(&rdev->irq.pflip[0])) {
2514
		DRM_DEBUG("r600_irq_set: vblank 0\n");
3036
		DRM_DEBUG("r600_irq_set: vblank 0\n");
2515
		mode_int |= D1MODE_VBLANK_INT_MASK;
3037
		mode_int |= D1MODE_VBLANK_INT_MASK;
Line 2551... Line 3073...
2551
		DRM_DEBUG("r600_irq_set: hdmi 0\n");
3073
		DRM_DEBUG("r600_irq_set: hdmi 0\n");
2552
		hdmi1 |= HDMI0_AZ_FORMAT_WTRIG_MASK;
3074
		hdmi1 |= HDMI0_AZ_FORMAT_WTRIG_MASK;
2553
	}
3075
	}
Line 2554... Line 3076...
2554
 
3076
 
-
 
3077
	WREG32(CP_INT_CNTL, cp_int_cntl);
2555
	WREG32(CP_INT_CNTL, cp_int_cntl);
3078
	WREG32(DMA_CNTL, dma_cntl);
2556
	WREG32(DxMODE_INT_MASK, mode_int);
3079
	WREG32(DxMODE_INT_MASK, mode_int);
2557
	WREG32(D1GRPH_INTERRUPT_CONTROL, d1grph);
3080
	WREG32(D1GRPH_INTERRUPT_CONTROL, d1grph);
2558
	WREG32(D2GRPH_INTERRUPT_CONTROL, d2grph);
3081
	WREG32(D2GRPH_INTERRUPT_CONTROL, d2grph);
2559
	WREG32(GRBM_INT_CNTL, grbm_int_cntl);
3082
	WREG32(GRBM_INT_CNTL, grbm_int_cntl);
Line 2696... Line 3219...
2696
		}
3219
		}
2697
		}
3220
		}
2698
	}
3221
	}
2699
}
3222
}
Line -... Line 3223...
-
 
3223
 
-
 
3224
void r600_irq_disable(struct radeon_device *rdev)
-
 
3225
{
-
 
3226
	r600_disable_interrupts(rdev);
-
 
3227
	/* Wait and acknowledge irq */
-
 
3228
	mdelay(1);
-
 
3229
	r600_irq_ack(rdev);
-
 
3230
	r600_disable_interrupt_state(rdev);
-
 
3231
}
2700
 
3232
 
2701
static u32 r600_get_ih_wptr(struct radeon_device *rdev)
3233
static u32 r600_get_ih_wptr(struct radeon_device *rdev)
2702
{
3234
{
Line 2703... Line 3235...
2703
	u32 wptr, tmp;
3235
	u32 wptr, tmp;
Line 2923... Line 3455...
2923
			break;
3455
			break;
2924
		case 181: /* CP EOP event */
3456
		case 181: /* CP EOP event */
2925
			DRM_DEBUG("IH: CP EOP\n");
3457
			DRM_DEBUG("IH: CP EOP\n");
2926
			radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
3458
			radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
2927
			break;
3459
			break;
-
 
3460
		case 224: /* DMA trap event */
-
 
3461
			DRM_DEBUG("IH: DMA trap\n");
-
 
3462
			radeon_fence_process(rdev, R600_RING_TYPE_DMA_INDEX);
-
 
3463
			break;
2928
		case 233: /* GUI IDLE */
3464
		case 233: /* GUI IDLE */
2929
			DRM_DEBUG("IH: GUI idle\n");
3465
			DRM_DEBUG("IH: GUI idle\n");
2930
			break;
3466
			break;
2931
		default:
3467
		default:
2932
			DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3468
			DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);