1734,7 → 1734,7 |
case CHIP_SUMO: |
rdev->config.evergreen.num_ses = 1; |
rdev->config.evergreen.max_pipes = 4; |
rdev->config.evergreen.max_tile_pipes = 2; |
rdev->config.evergreen.max_tile_pipes = 4; |
if (rdev->pdev->device == 0x9648) |
rdev->config.evergreen.max_simds = 3; |
else if ((rdev->pdev->device == 0x9647) || |
1757,7 → 1757,7 |
rdev->config.evergreen.sc_prim_fifo_size = 0x40; |
rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; |
rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; |
gb_addr_config = REDWOOD_GB_ADDR_CONFIG_GOLDEN; |
gb_addr_config = SUMO_GB_ADDR_CONFIG_GOLDEN; |
break; |
case CHIP_SUMO2: |
rdev->config.evergreen.num_ses = 1; |
1779,7 → 1779,7 |
rdev->config.evergreen.sc_prim_fifo_size = 0x40; |
rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; |
rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; |
gb_addr_config = REDWOOD_GB_ADDR_CONFIG_GOLDEN; |
gb_addr_config = SUMO2_GB_ADDR_CONFIG_GOLDEN; |
break; |
case CHIP_BARTS: |
rdev->config.evergreen.num_ses = 2; |
1827,7 → 1827,7 |
break; |
case CHIP_CAICOS: |
rdev->config.evergreen.num_ses = 1; |
rdev->config.evergreen.max_pipes = 4; |
rdev->config.evergreen.max_pipes = 2; |
rdev->config.evergreen.max_tile_pipes = 2; |
rdev->config.evergreen.max_simds = 2; |
rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses; |
1947,6 → 1947,7 |
WREG32(GB_ADDR_CONFIG, gb_addr_config); |
WREG32(DMIF_ADDR_CONFIG, gb_addr_config); |
WREG32(HDP_ADDR_CONFIG, gb_addr_config); |
WREG32(DMA_TILING_CONFIG, gb_addr_config); |
|
tmp = gb_addr_config & NUM_PIPES_MASK; |
tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.evergreen.max_backends, |
2316,8 → 2317,12 |
CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE); |
cayman_cp_int_cntl_setup(rdev, 1, 0); |
cayman_cp_int_cntl_setup(rdev, 2, 0); |
tmp = RREG32(CAYMAN_DMA1_CNTL) & ~TRAP_ENABLE; |
WREG32(CAYMAN_DMA1_CNTL, tmp); |
} else |
WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE); |
tmp = RREG32(DMA_CNTL) & ~TRAP_ENABLE; |
WREG32(DMA_CNTL, tmp); |
WREG32(GRBM_INT_CNTL, 0); |
WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); |
WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); |
2370,6 → 2375,7 |
u32 grbm_int_cntl = 0; |
u32 grph1 = 0, grph2 = 0, grph3 = 0, grph4 = 0, grph5 = 0, grph6 = 0; |
u32 afmt1 = 0, afmt2 = 0, afmt3 = 0, afmt4 = 0, afmt5 = 0, afmt6 = 0; |
u32 dma_cntl, dma_cntl1 = 0; |
|
if (!rdev->irq.installed) { |
WARN(1, "Can't enable IRQ/MSI because no handler is installed\n"); |
2397,6 → 2403,8 |
afmt5 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK; |
afmt6 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK; |
|
dma_cntl = RREG32(DMA_CNTL) & ~TRAP_ENABLE; |
|
if (rdev->family >= CHIP_CAYMAN) { |
/* enable CP interrupts on all rings */ |
if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) { |
2419,6 → 2427,19 |
} |
} |
|
if (atomic_read(&rdev->irq.ring_int[R600_RING_TYPE_DMA_INDEX])) { |
DRM_DEBUG("r600_irq_set: sw int dma\n"); |
dma_cntl |= TRAP_ENABLE; |
} |
|
if (rdev->family >= CHIP_CAYMAN) { |
dma_cntl1 = RREG32(CAYMAN_DMA1_CNTL) & ~TRAP_ENABLE; |
if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_DMA1_INDEX])) { |
DRM_DEBUG("r600_irq_set: sw int dma1\n"); |
dma_cntl1 |= TRAP_ENABLE; |
} |
} |
|
if (rdev->irq.crtc_vblank_int[0] || |
atomic_read(&rdev->irq.pflip[0])) { |
DRM_DEBUG("evergreen_irq_set: vblank 0\n"); |
2504,6 → 2525,12 |
cayman_cp_int_cntl_setup(rdev, 2, cp_int_cntl2); |
} else |
WREG32(CP_INT_CNTL, cp_int_cntl); |
|
WREG32(DMA_CNTL, dma_cntl); |
|
if (rdev->family >= CHIP_CAYMAN) |
WREG32(CAYMAN_DMA1_CNTL, dma_cntl1); |
|
WREG32(GRBM_INT_CNTL, grbm_int_cntl); |
|
WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1); |
3006,6 → 3033,16 |
break; |
} |
break; |
case 146: |
case 147: |
dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data); |
dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n", |
RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR)); |
dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", |
RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS)); |
/* reset addr and status */ |
WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1); |
break; |
case 176: /* CP_INT in ring buffer */ |
case 177: /* CP_INT in IB1 */ |
case 178: /* CP_INT in IB2 */ |
3029,9 → 3066,19 |
} else |
radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX); |
break; |
case 224: /* DMA trap event */ |
DRM_DEBUG("IH: DMA trap\n"); |
radeon_fence_process(rdev, R600_RING_TYPE_DMA_INDEX); |
break; |
case 233: /* GUI IDLE */ |
DRM_DEBUG("IH: GUI idle\n"); |
break; |
case 244: /* DMA trap event */ |
if (rdev->family >= CHIP_CAYMAN) { |
DRM_DEBUG("IH: DMA1 trap\n"); |
radeon_fence_process(rdev, CAYMAN_RING_TYPE_DMA1_INDEX); |
} |
break; |
default: |
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); |
break; |
3053,6 → 3100,143 |
return IRQ_HANDLED; |
} |
|
/** |
* evergreen_dma_fence_ring_emit - emit a fence on the DMA ring |
* |
* @rdev: radeon_device pointer |
* @fence: radeon fence object |
* |
* Add a DMA fence packet to the ring to write |
* the fence seq number and DMA trap packet to generate |
* an interrupt if needed (evergreen-SI). |
*/ |
void evergreen_dma_fence_ring_emit(struct radeon_device *rdev, |
struct radeon_fence *fence) |
{ |
struct radeon_ring *ring = &rdev->ring[fence->ring]; |
u64 addr = rdev->fence_drv[fence->ring].gpu_addr; |
/* write the fence */ |
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0, 0)); |
radeon_ring_write(ring, addr & 0xfffffffc); |
radeon_ring_write(ring, (upper_32_bits(addr) & 0xff)); |
radeon_ring_write(ring, fence->seq); |
/* generate an interrupt */ |
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0, 0)); |
/* flush HDP */ |
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0)); |
radeon_ring_write(ring, (0xf << 16) | HDP_MEM_COHERENCY_FLUSH_CNTL); |
radeon_ring_write(ring, 1); |
} |
|
/** |
* evergreen_dma_ring_ib_execute - schedule an IB on the DMA engine |
* |
* @rdev: radeon_device pointer |
* @ib: IB object to schedule |
* |
* Schedule an IB in the DMA ring (evergreen). |
*/ |
void evergreen_dma_ring_ib_execute(struct radeon_device *rdev, |
struct radeon_ib *ib) |
{ |
struct radeon_ring *ring = &rdev->ring[ib->ring]; |
|
if (rdev->wb.enabled) { |
u32 next_rptr = ring->wptr + 4; |
while ((next_rptr & 7) != 5) |
next_rptr++; |
next_rptr += 3; |
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1)); |
radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc); |
radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff); |
radeon_ring_write(ring, next_rptr); |
} |
|
/* The indirect buffer packet must end on an 8 DW boundary in the DMA ring. |
* Pad as necessary with NOPs. |
*/ |
while ((ring->wptr & 7) != 5) |
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0)); |
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_INDIRECT_BUFFER, 0, 0, 0)); |
radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0)); |
radeon_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF)); |
|
} |
|
/** |
* evergreen_copy_dma - copy pages using the DMA engine |
* |
* @rdev: radeon_device pointer |
* @src_offset: src GPU address |
* @dst_offset: dst GPU address |
* @num_gpu_pages: number of GPU pages to xfer |
* @fence: radeon fence object |
* |
* Copy GPU paging using the DMA engine (evergreen-cayman). |
* Used by the radeon ttm implementation to move pages if |
* registered as the asic copy callback. |
*/ |
int evergreen_copy_dma(struct radeon_device *rdev, |
uint64_t src_offset, uint64_t dst_offset, |
unsigned num_gpu_pages, |
struct radeon_fence **fence) |
{ |
struct radeon_semaphore *sem = NULL; |
int ring_index = rdev->asic->copy.dma_ring_index; |
struct radeon_ring *ring = &rdev->ring[ring_index]; |
u32 size_in_dw, cur_size_in_dw; |
int i, num_loops; |
int r = 0; |
|
r = radeon_semaphore_create(rdev, &sem); |
if (r) { |
DRM_ERROR("radeon: moving bo (%d).\n", r); |
return r; |
} |
|
size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4; |
num_loops = DIV_ROUND_UP(size_in_dw, 0xfffff); |
r = radeon_ring_lock(rdev, ring, num_loops * 5 + 11); |
if (r) { |
DRM_ERROR("radeon: moving bo (%d).\n", r); |
radeon_semaphore_free(rdev, &sem, NULL); |
return r; |
} |
|
if (radeon_fence_need_sync(*fence, ring->idx)) { |
radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring, |
ring->idx); |
radeon_fence_note_sync(*fence, ring->idx); |
} else { |
radeon_semaphore_free(rdev, &sem, NULL); |
} |
|
for (i = 0; i < num_loops; i++) { |
cur_size_in_dw = size_in_dw; |
if (cur_size_in_dw > 0xFFFFF) |
cur_size_in_dw = 0xFFFFF; |
size_in_dw -= cur_size_in_dw; |
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, 0, cur_size_in_dw)); |
radeon_ring_write(ring, dst_offset & 0xfffffffc); |
radeon_ring_write(ring, src_offset & 0xfffffffc); |
radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff); |
radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff); |
src_offset += cur_size_in_dw * 4; |
dst_offset += cur_size_in_dw * 4; |
} |
|
r = radeon_fence_emit(rdev, fence, ring->idx); |
if (r) { |
radeon_ring_unlock_undo(rdev, ring); |
return r; |
} |
|
radeon_ring_unlock_commit(rdev, ring); |
radeon_semaphore_free(rdev, &sem, *fence); |
|
return r; |
} |
|
static int evergreen_startup(struct radeon_device *rdev) |
{ |
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; |
3110,6 → 3294,18 |
if (r) |
return r; |
|
r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX); |
if (r) { |
dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r); |
return r; |
} |
|
r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX); |
if (r) { |
dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r); |
return r; |
} |
|
/* Enable IRQ */ |
r = r600_irq_init(rdev); |
if (r) { |
3124,6 → 3320,14 |
0, 0xfffff, RADEON_CP_PACKET2); |
if (r) |
return r; |
|
ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX]; |
r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET, |
DMA_RB_RPTR, DMA_RB_WPTR, |
2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0)); |
if (r) |
return r; |
|
r = evergreen_cp_load_microcode(rdev); |
if (r) |
return r; |
3130,7 → 3334,16 |
r = evergreen_cp_resume(rdev); |
if (r) |
return r; |
r = r600_dma_resume(rdev); |
if (r) |
return r; |
|
r = radeon_ib_pool_init(rdev); |
if (r) { |
dev_err(rdev->dev, "IB initialization failed (%d).\n", r); |
return r; |
} |
|
return 0; |
} |
|
3229,6 → 3442,9 |
rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL; |
r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024); |
|
rdev->ring[R600_RING_TYPE_DMA_INDEX].ring_obj = NULL; |
r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX], 64 * 1024); |
|
rdev->ih.ring_obj = NULL; |
r600_ih_ring_init(rdev, 64 * 1024); |
|