812,9 → 812,8 |
* reset, it's up to the caller to determine if the GPU needs one. We |
* might add an helper function to check that. |
*/ |
static int r600_gpu_soft_reset(struct radeon_device *rdev) |
static void r600_gpu_soft_reset_gfx(struct radeon_device *rdev) |
{ |
struct rv515_mc_save save; |
u32 grbm_busy_mask = S_008010_VC_BUSY(1) | S_008010_VGT_BUSY_NO_DMA(1) | |
S_008010_VGT_BUSY(1) | S_008010_TA03_BUSY(1) | |
S_008010_TC_BUSY(1) | S_008010_SX_BUSY(1) | |
834,9 → 833,8 |
u32 tmp; |
|
if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE)) |
return 0; |
return; |
|
dev_info(rdev->dev, "GPU softreset \n"); |
dev_info(rdev->dev, " R_008010_GRBM_STATUS=0x%08X\n", |
RREG32(R_008010_GRBM_STATUS)); |
dev_info(rdev->dev, " R_008014_GRBM_STATUS2=0x%08X\n", |
851,12 → 849,10 |
RREG32(CP_BUSY_STAT)); |
dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n", |
RREG32(CP_STAT)); |
rv515_mc_stop(rdev, &save); |
if (r600_mc_wait_for_idle(rdev)) { |
dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); |
} |
|
/* Disable CP parsing/prefetching */ |
WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1)); |
|
/* Check if any of the rendering block is busy and reset it */ |
if ((RREG32(R_008010_GRBM_STATUS) & grbm_busy_mask) || |
(RREG32(R_008014_GRBM_STATUS2) & grbm2_busy_mask)) { |
886,8 → 882,7 |
RREG32(R_008020_GRBM_SOFT_RESET); |
mdelay(15); |
WREG32(R_008020_GRBM_SOFT_RESET, 0); |
/* Wait a little for things to settle down */ |
mdelay(1); |
|
dev_info(rdev->dev, " R_008010_GRBM_STATUS=0x%08X\n", |
RREG32(R_008010_GRBM_STATUS)); |
dev_info(rdev->dev, " R_008014_GRBM_STATUS2=0x%08X\n", |
902,6 → 897,60 |
RREG32(CP_BUSY_STAT)); |
dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n", |
RREG32(CP_STAT)); |
|
} |
|
static void r600_gpu_soft_reset_dma(struct radeon_device *rdev) |
{ |
u32 tmp; |
|
if (RREG32(DMA_STATUS_REG) & DMA_IDLE) |
return; |
|
dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n", |
RREG32(DMA_STATUS_REG)); |
|
/* Disable DMA */ |
tmp = RREG32(DMA_RB_CNTL); |
tmp &= ~DMA_RB_ENABLE; |
WREG32(DMA_RB_CNTL, tmp); |
|
/* Reset dma */ |
if (rdev->family >= CHIP_RV770) |
WREG32(SRBM_SOFT_RESET, RV770_SOFT_RESET_DMA); |
else |
WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA); |
RREG32(SRBM_SOFT_RESET); |
udelay(50); |
WREG32(SRBM_SOFT_RESET, 0); |
|
dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n", |
RREG32(DMA_STATUS_REG)); |
} |
|
static int r600_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask) |
{ |
struct rv515_mc_save save; |
|
if (reset_mask == 0) |
return 0; |
|
dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask); |
|
rv515_mc_stop(rdev, &save); |
if (r600_mc_wait_for_idle(rdev)) { |
dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); |
} |
|
if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE)) |
r600_gpu_soft_reset_gfx(rdev); |
|
if (reset_mask & RADEON_RESET_DMA) |
r600_gpu_soft_reset_dma(rdev); |
|
/* Wait a little for things to settle down */ |
mdelay(1); |
|
rv515_mc_resume(rdev, &save); |
return 0; |
} |
924,9 → 973,34 |
return radeon_ring_test_lockup(rdev, ring); |
} |
|
/** |
* r600_dma_is_lockup - Check if the DMA engine is locked up |
* |
* @rdev: radeon_device pointer |
* @ring: radeon_ring structure holding ring information |
* |
* Check if the async DMA engine is locked up (r6xx-evergreen). |
* Returns true if the engine appears to be locked up, false if not. |
*/ |
bool r600_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring) |
{ |
u32 dma_status_reg; |
|
dma_status_reg = RREG32(DMA_STATUS_REG); |
if (dma_status_reg & DMA_IDLE) { |
radeon_ring_lockup_update(ring); |
return false; |
} |
/* force ring activities */ |
radeon_ring_force_activity(rdev, ring); |
return radeon_ring_test_lockup(rdev, ring); |
} |
|
int r600_asic_reset(struct radeon_device *rdev) |
{ |
return r600_gpu_soft_reset(rdev); |
return r600_gpu_soft_reset(rdev, (RADEON_RESET_GFX | |
RADEON_RESET_COMPUTE | |
RADEON_RESET_DMA)); |
} |
|
u32 r6xx_remap_render_backend(struct radeon_device *rdev, |
978,14 → 1052,8 |
|
int r600_count_pipe_bits(uint32_t val) |
{ |
int i, ret = 0; |
|
for (i = 0; i < 32; i++) { |
ret += val & 1; |
val >>= 1; |
return hweight32(val); |
} |
return ret; |
} |
|
static void r600_gpu_init(struct radeon_device *rdev) |
{ |
1148,6 → 1216,7 |
WREG32(GB_TILING_CONFIG, tiling_config); |
WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff); |
WREG32(HDP_TILING_CONFIG, tiling_config & 0xffff); |
WREG32(DMA_TILING_CONFIG, tiling_config & 0xffff); |
|
tmp = R6XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8); |
WREG32(VGT_OUT_DEALLOC_CNTL, (tmp * 4) & DEALLOC_DIST_MASK); |
1422,9 → 1491,10 |
*/ |
void r600_cp_stop(struct radeon_device *rdev) |
{ |
// radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); |
radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); |
WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1)); |
WREG32(SCRATCH_UMSK, 0); |
rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false; |
} |
|
int r600_init_microcode(struct radeon_device *rdev) |
1750,7 → 1820,129 |
radeon_scratch_free(rdev, ring->rptr_save_reg); |
} |
|
/* |
* DMA |
* Starting with R600, the GPU has an asynchronous |
* DMA engine. The programming model is very similar |
* to the 3D engine (ring buffer, IBs, etc.), but the |
* DMA controller has it's own packet format that is |
* different form the PM4 format used by the 3D engine. |
* It supports copying data, writing embedded data, |
* solid fills, and a number of other things. It also |
* has support for tiling/detiling of buffers. |
*/ |
/** |
* r600_dma_stop - stop the async dma engine |
* |
* @rdev: radeon_device pointer |
* |
* Stop the async dma engine (r6xx-evergreen). |
*/ |
void r600_dma_stop(struct radeon_device *rdev) |
{ |
u32 rb_cntl = RREG32(DMA_RB_CNTL); |
|
radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); |
|
rb_cntl &= ~DMA_RB_ENABLE; |
WREG32(DMA_RB_CNTL, rb_cntl); |
|
rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false; |
} |
|
/** |
* r600_dma_resume - setup and start the async dma engine |
* |
* @rdev: radeon_device pointer |
* |
* Set up the DMA ring buffer and enable it. (r6xx-evergreen). |
* Returns 0 for success, error for failure. |
*/ |
int r600_dma_resume(struct radeon_device *rdev) |
{ |
struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX]; |
u32 rb_cntl, dma_cntl; |
u32 rb_bufsz; |
int r; |
|
/* Reset dma */ |
if (rdev->family >= CHIP_RV770) |
WREG32(SRBM_SOFT_RESET, RV770_SOFT_RESET_DMA); |
else |
WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA); |
RREG32(SRBM_SOFT_RESET); |
udelay(50); |
WREG32(SRBM_SOFT_RESET, 0); |
|
WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL, 0); |
WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL, 0); |
|
/* Set ring buffer size in dwords */ |
rb_bufsz = drm_order(ring->ring_size / 4); |
rb_cntl = rb_bufsz << 1; |
#ifdef __BIG_ENDIAN |
rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE; |
#endif |
WREG32(DMA_RB_CNTL, rb_cntl); |
|
/* Initialize the ring buffer's read and write pointers */ |
WREG32(DMA_RB_RPTR, 0); |
WREG32(DMA_RB_WPTR, 0); |
|
/* set the wb address whether it's enabled or not */ |
WREG32(DMA_RB_RPTR_ADDR_HI, |
upper_32_bits(rdev->wb.gpu_addr + R600_WB_DMA_RPTR_OFFSET) & 0xFF); |
WREG32(DMA_RB_RPTR_ADDR_LO, |
((rdev->wb.gpu_addr + R600_WB_DMA_RPTR_OFFSET) & 0xFFFFFFFC)); |
|
if (rdev->wb.enabled) |
rb_cntl |= DMA_RPTR_WRITEBACK_ENABLE; |
|
WREG32(DMA_RB_BASE, ring->gpu_addr >> 8); |
|
/* enable DMA IBs */ |
WREG32(DMA_IB_CNTL, DMA_IB_ENABLE); |
|
dma_cntl = RREG32(DMA_CNTL); |
dma_cntl &= ~CTXEMPTY_INT_ENABLE; |
WREG32(DMA_CNTL, dma_cntl); |
|
if (rdev->family >= CHIP_RV770) |
WREG32(DMA_MODE, 1); |
|
ring->wptr = 0; |
WREG32(DMA_RB_WPTR, ring->wptr << 2); |
|
ring->rptr = RREG32(DMA_RB_RPTR) >> 2; |
|
WREG32(DMA_RB_CNTL, rb_cntl | DMA_RB_ENABLE); |
|
ring->ready = true; |
|
r = radeon_ring_test(rdev, R600_RING_TYPE_DMA_INDEX, ring); |
if (r) { |
ring->ready = false; |
return r; |
} |
|
radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size); |
|
return 0; |
} |
|
/** |
* r600_dma_fini - tear down the async dma engine |
* |
* @rdev: radeon_device pointer |
* |
* Stop the async dma engine and free the ring (r6xx-evergreen). |
*/ |
void r600_dma_fini(struct radeon_device *rdev) |
{ |
r600_dma_stop(rdev); |
radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]); |
} |
|
/* |
* GPU scratch registers helpers function. |
*/ |
1806,6 → 1998,64 |
return r; |
} |
|
/** |
* r600_dma_ring_test - simple async dma engine test |
* |
* @rdev: radeon_device pointer |
* @ring: radeon_ring structure holding ring information |
* |
* Test the DMA engine by writing using it to write an |
* value to memory. (r6xx-SI). |
* Returns 0 for success, error for failure. |
*/ |
int r600_dma_ring_test(struct radeon_device *rdev, |
struct radeon_ring *ring) |
{ |
unsigned i; |
int r; |
void __iomem *ptr = (void *)rdev->vram_scratch.ptr; |
u32 tmp; |
|
if (!ptr) { |
DRM_ERROR("invalid vram scratch pointer\n"); |
return -EINVAL; |
} |
|
tmp = 0xCAFEDEAD; |
writel(tmp, ptr); |
|
r = radeon_ring_lock(rdev, ring, 4); |
if (r) { |
DRM_ERROR("radeon: dma failed to lock ring %d (%d).\n", ring->idx, r); |
return r; |
} |
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1)); |
radeon_ring_write(ring, rdev->vram_scratch.gpu_addr & 0xfffffffc); |
radeon_ring_write(ring, upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xff); |
radeon_ring_write(ring, 0xDEADBEEF); |
radeon_ring_unlock_commit(rdev, ring); |
|
for (i = 0; i < rdev->usec_timeout; i++) { |
tmp = readl(ptr); |
if (tmp == 0xDEADBEEF) |
break; |
DRM_UDELAY(1); |
} |
|
if (i < rdev->usec_timeout) { |
DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i); |
} else { |
DRM_ERROR("radeon: ring %d test failed (0x%08X)\n", |
ring->idx, tmp); |
r = -EINVAL; |
} |
return r; |
} |
|
/* |
* CP fences/semaphores |
*/ |
|
void r600_fence_ring_emit(struct radeon_device *rdev, |
struct radeon_fence *fence) |
{ |
1869,6 → 2119,59 |
radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | sel); |
} |
|
/* |
* DMA fences/semaphores |
*/ |
|
/** |
* r600_dma_fence_ring_emit - emit a fence on the DMA ring |
* |
* @rdev: radeon_device pointer |
* @fence: radeon fence object |
* |
* Add a DMA fence packet to the ring to write |
* the fence seq number and DMA trap packet to generate |
* an interrupt if needed (r6xx-r7xx). |
*/ |
void r600_dma_fence_ring_emit(struct radeon_device *rdev, |
struct radeon_fence *fence) |
{ |
struct radeon_ring *ring = &rdev->ring[fence->ring]; |
u64 addr = rdev->fence_drv[fence->ring].gpu_addr; |
|
/* write the fence */ |
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0, 0)); |
radeon_ring_write(ring, addr & 0xfffffffc); |
radeon_ring_write(ring, (upper_32_bits(addr) & 0xff)); |
radeon_ring_write(ring, lower_32_bits(fence->seq)); |
/* generate an interrupt */ |
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0, 0)); |
} |
|
/** |
* r600_dma_semaphore_ring_emit - emit a semaphore on the dma ring |
* |
* @rdev: radeon_device pointer |
* @ring: radeon_ring structure holding ring information |
* @semaphore: radeon semaphore object |
* @emit_wait: wait or signal semaphore |
* |
* Add a DMA semaphore packet to the ring wait on or signal |
* other rings (r6xx-SI). |
*/ |
void r600_dma_semaphore_ring_emit(struct radeon_device *rdev, |
struct radeon_ring *ring, |
struct radeon_semaphore *semaphore, |
bool emit_wait) |
{ |
u64 addr = semaphore->gpu_addr; |
u32 s = emit_wait ? 0 : 1; |
|
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SEMAPHORE, 0, s, 0)); |
radeon_ring_write(ring, addr & 0xfffffffc); |
radeon_ring_write(ring, upper_32_bits(addr) & 0xff); |
} |
|
int r600_copy_blit(struct radeon_device *rdev, |
uint64_t src_offset, |
uint64_t dst_offset, |
1888,6 → 2191,80 |
return 0; |
} |
|
/** |
* r600_copy_dma - copy pages using the DMA engine |
* |
* @rdev: radeon_device pointer |
* @src_offset: src GPU address |
* @dst_offset: dst GPU address |
* @num_gpu_pages: number of GPU pages to xfer |
* @fence: radeon fence object |
* |
* Copy GPU paging using the DMA engine (r6xx). |
* Used by the radeon ttm implementation to move pages if |
* registered as the asic copy callback. |
*/ |
int r600_copy_dma(struct radeon_device *rdev, |
uint64_t src_offset, uint64_t dst_offset, |
unsigned num_gpu_pages, |
struct radeon_fence **fence) |
{ |
struct radeon_semaphore *sem = NULL; |
int ring_index = rdev->asic->copy.dma_ring_index; |
struct radeon_ring *ring = &rdev->ring[ring_index]; |
u32 size_in_dw, cur_size_in_dw; |
int i, num_loops; |
int r = 0; |
|
r = radeon_semaphore_create(rdev, &sem); |
if (r) { |
DRM_ERROR("radeon: moving bo (%d).\n", r); |
return r; |
} |
|
size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4; |
num_loops = DIV_ROUND_UP(size_in_dw, 0xFFFE); |
r = radeon_ring_lock(rdev, ring, num_loops * 4 + 8); |
if (r) { |
DRM_ERROR("radeon: moving bo (%d).\n", r); |
radeon_semaphore_free(rdev, &sem, NULL); |
return r; |
} |
|
if (radeon_fence_need_sync(*fence, ring->idx)) { |
radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring, |
ring->idx); |
radeon_fence_note_sync(*fence, ring->idx); |
} else { |
radeon_semaphore_free(rdev, &sem, NULL); |
} |
|
for (i = 0; i < num_loops; i++) { |
cur_size_in_dw = size_in_dw; |
if (cur_size_in_dw > 0xFFFE) |
cur_size_in_dw = 0xFFFE; |
size_in_dw -= cur_size_in_dw; |
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, 0, cur_size_in_dw)); |
radeon_ring_write(ring, dst_offset & 0xfffffffc); |
radeon_ring_write(ring, src_offset & 0xfffffffc); |
radeon_ring_write(ring, (((upper_32_bits(dst_offset) & 0xff) << 16) | |
(upper_32_bits(src_offset) & 0xff))); |
src_offset += cur_size_in_dw * 4; |
dst_offset += cur_size_in_dw * 4; |
} |
|
r = radeon_fence_emit(rdev, fence, ring->idx); |
if (r) { |
radeon_ring_unlock_undo(rdev, ring); |
return r; |
} |
|
radeon_ring_unlock_commit(rdev, ring); |
radeon_semaphore_free(rdev, &sem, *fence); |
|
return r; |
} |
|
int r600_set_surface_reg(struct radeon_device *rdev, int reg, |
uint32_t tiling_flags, uint32_t pitch, |
uint32_t offset, uint32_t obj_size) |
1903,7 → 2280,7 |
|
static int r600_startup(struct radeon_device *rdev) |
{ |
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; |
struct radeon_ring *ring; |
int r; |
|
/* enable pcie gen2 link */ |
1938,6 → 2315,18 |
if (r) |
return r; |
|
r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX); |
if (r) { |
dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r); |
return r; |
} |
|
r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX); |
if (r) { |
dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r); |
return r; |
} |
|
/* Enable IRQ */ |
r = r600_irq_init(rdev); |
if (r) { |
1947,12 → 2336,20 |
} |
r600_irq_set(rdev); |
|
ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; |
r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET, |
R600_CP_RB_RPTR, R600_CP_RB_WPTR, |
0, 0xfffff, RADEON_CP_PACKET2); |
if (r) |
return r; |
|
ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX]; |
r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET, |
DMA_RB_RPTR, DMA_RB_WPTR, |
2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0)); |
if (r) |
return r; |
|
r = r600_cp_load_microcode(rdev); |
if (r) |
return r; |
1960,6 → 2357,15 |
if (r) |
return r; |
|
r = r600_dma_resume(rdev); |
if (r) |
return r; |
|
r = radeon_ib_pool_init(rdev); |
if (r) { |
dev_err(rdev->dev, "IB initialization failed (%d).\n", r); |
return r; |
} |
return 0; |
} |
|
2046,6 → 2452,9 |
rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL; |
r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024); |
|
rdev->ring[R600_RING_TYPE_DMA_INDEX].ring_obj = NULL; |
r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX], 64 * 1024); |
|
rdev->ih.ring_obj = NULL; |
r600_ih_ring_init(rdev, 64 * 1024); |
|
2150,6 → 2559,109 |
return r; |
} |
|
/** |
* r600_dma_ib_test - test an IB on the DMA engine |
* |
* @rdev: radeon_device pointer |
* @ring: radeon_ring structure holding ring information |
* |
* Test a simple IB in the DMA ring (r6xx-SI). |
* Returns 0 on success, error on failure. |
*/ |
int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring) |
{ |
struct radeon_ib ib; |
unsigned i; |
int r; |
void __iomem *ptr = (void *)rdev->vram_scratch.ptr; |
u32 tmp = 0; |
|
ENTER(); |
|
if (!ptr) { |
DRM_ERROR("invalid vram scratch pointer\n"); |
return -EINVAL; |
} |
|
tmp = 0xCAFEDEAD; |
writel(tmp, ptr); |
|
r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256); |
if (r) { |
DRM_ERROR("radeon: failed to get ib (%d).\n", r); |
return r; |
} |
|
ib.ptr[0] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1); |
ib.ptr[1] = rdev->vram_scratch.gpu_addr & 0xfffffffc; |
ib.ptr[2] = upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xff; |
ib.ptr[3] = 0xDEADBEEF; |
ib.length_dw = 4; |
|
r = radeon_ib_schedule(rdev, &ib, NULL); |
if (r) { |
radeon_ib_free(rdev, &ib); |
DRM_ERROR("radeon: failed to schedule ib (%d).\n", r); |
return r; |
} |
r = radeon_fence_wait(ib.fence, false); |
if (r) { |
DRM_ERROR("radeon: fence wait failed (%d).\n", r); |
return r; |
} |
for (i = 0; i < rdev->usec_timeout; i++) { |
tmp = readl(ptr); |
if (tmp == 0xDEADBEEF) |
break; |
DRM_UDELAY(1); |
} |
if (i < rdev->usec_timeout) { |
DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib.fence->ring, i); |
} else { |
DRM_ERROR("radeon: ib test failed (0x%08X)\n", tmp); |
r = -EINVAL; |
} |
radeon_ib_free(rdev, &ib); |
|
LEAVE(); |
|
return r; |
} |
|
/** |
* r600_dma_ring_ib_execute - Schedule an IB on the DMA engine |
* |
* @rdev: radeon_device pointer |
* @ib: IB object to schedule |
* |
* Schedule an IB in the DMA ring (r6xx-r7xx). |
*/ |
void r600_dma_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) |
{ |
struct radeon_ring *ring = &rdev->ring[ib->ring]; |
|
if (rdev->wb.enabled) { |
u32 next_rptr = ring->wptr + 4; |
while ((next_rptr & 7) != 5) |
next_rptr++; |
next_rptr += 3; |
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1)); |
radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc); |
radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff); |
radeon_ring_write(ring, next_rptr); |
} |
|
/* The indirect buffer packet must end on an 8 DW boundary in the DMA ring. |
* Pad as necessary with NOPs. |
*/ |
while ((ring->wptr & 7) != 5) |
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0)); |
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_INDIRECT_BUFFER, 0, 0, 0)); |
radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0)); |
radeon_ring_write(ring, (ib->length_dw << 16) | (upper_32_bits(ib->gpu_addr) & 0xFF)); |
|
} |
|
/* |
* Interrupts |
* |
2341,6 → 2853,8 |
u32 tmp; |
|
WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE); |
tmp = RREG32(DMA_CNTL) & ~TRAP_ENABLE; |
WREG32(DMA_CNTL, tmp); |
WREG32(GRBM_INT_CNTL, 0); |
WREG32(DxMODE_INT_MASK, 0); |
WREG32(D1GRPH_INTERRUPT_CONTROL, 0); |
2469,6 → 2983,7 |
u32 grbm_int_cntl = 0; |
u32 hdmi0, hdmi1; |
u32 d1grph = 0, d2grph = 0; |
u32 dma_cntl; |
|
if (!rdev->irq.installed) { |
WARN(1, "Can't enable IRQ/MSI because no handler is installed\n"); |
2503,6 → 3018,7 |
hdmi0 = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK; |
hdmi1 = RREG32(HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK; |
} |
dma_cntl = RREG32(DMA_CNTL) & ~TRAP_ENABLE; |
|
if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) { |
DRM_DEBUG("r600_irq_set: sw int\n"); |
2509,6 → 3025,12 |
cp_int_cntl |= RB_INT_ENABLE; |
cp_int_cntl |= TIME_STAMP_INT_ENABLE; |
} |
|
if (atomic_read(&rdev->irq.ring_int[R600_RING_TYPE_DMA_INDEX])) { |
DRM_DEBUG("r600_irq_set: sw int dma\n"); |
dma_cntl |= TRAP_ENABLE; |
} |
|
if (rdev->irq.crtc_vblank_int[0] || |
atomic_read(&rdev->irq.pflip[0])) { |
DRM_DEBUG("r600_irq_set: vblank 0\n"); |
2553,6 → 3075,7 |
} |
|
WREG32(CP_INT_CNTL, cp_int_cntl); |
WREG32(DMA_CNTL, dma_cntl); |
WREG32(DxMODE_INT_MASK, mode_int); |
WREG32(D1GRPH_INTERRUPT_CONTROL, d1grph); |
WREG32(D2GRPH_INTERRUPT_CONTROL, d2grph); |
2698,6 → 3221,15 |
} |
} |
|
void r600_irq_disable(struct radeon_device *rdev) |
{ |
r600_disable_interrupts(rdev); |
/* Wait and acknowledge irq */ |
mdelay(1); |
r600_irq_ack(rdev); |
r600_disable_interrupt_state(rdev); |
} |
|
static u32 r600_get_ih_wptr(struct radeon_device *rdev) |
{ |
u32 wptr, tmp; |
2925,6 → 3457,10 |
DRM_DEBUG("IH: CP EOP\n"); |
radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX); |
break; |
case 224: /* DMA trap event */ |
DRM_DEBUG("IH: DMA trap\n"); |
radeon_fence_process(rdev, R600_RING_TYPE_DMA_INDEX); |
break; |
case 233: /* GUI IDLE */ |
DRM_DEBUG("IH: GUI idle\n"); |
break; |