122,9 → 122,97 |
|
int r600_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk) |
{ |
unsigned fb_div = 0, ref_div, vclk_div = 0, dclk_div = 0; |
int r; |
|
/* bypass vclk and dclk with bclk */ |
WREG32_P(CG_UPLL_FUNC_CNTL_2, |
VCLK_SRC_SEL(1) | DCLK_SRC_SEL(1), |
~(VCLK_SRC_SEL_MASK | DCLK_SRC_SEL_MASK)); |
|
/* assert BYPASS_EN, deassert UPLL_RESET, UPLL_SLEEP and UPLL_CTLREQ */ |
WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_BYPASS_EN_MASK, ~( |
UPLL_RESET_MASK | UPLL_SLEEP_MASK | UPLL_CTLREQ_MASK)); |
|
if (rdev->family >= CHIP_RS780) |
WREG32_P(GFX_MACRO_BYPASS_CNTL, UPLL_BYPASS_CNTL, |
~UPLL_BYPASS_CNTL); |
|
if (!vclk || !dclk) { |
/* keep the Bypass mode, put PLL to sleep */ |
WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_SLEEP_MASK, ~UPLL_SLEEP_MASK); |
return 0; |
} |
|
if (rdev->clock.spll.reference_freq == 10000) |
ref_div = 34; |
else |
ref_div = 4; |
|
r = radeon_uvd_calc_upll_dividers(rdev, vclk, dclk, 50000, 160000, |
ref_div + 1, 0xFFF, 2, 30, ~0, |
&fb_div, &vclk_div, &dclk_div); |
if (r) |
return r; |
|
if (rdev->family >= CHIP_RV670 && rdev->family < CHIP_RS780) |
fb_div >>= 1; |
else |
fb_div |= 1; |
|
r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL); |
if (r) |
return r; |
|
/* assert PLL_RESET */ |
WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_RESET_MASK, ~UPLL_RESET_MASK); |
|
/* For RS780 we have to choose ref clk */ |
if (rdev->family >= CHIP_RS780) |
WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_REFCLK_SRC_SEL_MASK, |
~UPLL_REFCLK_SRC_SEL_MASK); |
|
/* set the required fb, ref and post divder values */ |
WREG32_P(CG_UPLL_FUNC_CNTL, |
UPLL_FB_DIV(fb_div) | |
UPLL_REF_DIV(ref_div), |
~(UPLL_FB_DIV_MASK | UPLL_REF_DIV_MASK)); |
WREG32_P(CG_UPLL_FUNC_CNTL_2, |
UPLL_SW_HILEN(vclk_div >> 1) | |
UPLL_SW_LOLEN((vclk_div >> 1) + (vclk_div & 1)) | |
UPLL_SW_HILEN2(dclk_div >> 1) | |
UPLL_SW_LOLEN2((dclk_div >> 1) + (dclk_div & 1)) | |
UPLL_DIVEN_MASK | UPLL_DIVEN2_MASK, |
~UPLL_SW_MASK); |
|
/* give the PLL some time to settle */ |
mdelay(15); |
|
/* deassert PLL_RESET */ |
WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_RESET_MASK); |
|
mdelay(15); |
|
/* deassert BYPASS EN */ |
WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_BYPASS_EN_MASK); |
|
if (rdev->family >= CHIP_RS780) |
WREG32_P(GFX_MACRO_BYPASS_CNTL, 0, ~UPLL_BYPASS_CNTL); |
|
r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL); |
if (r) |
return r; |
|
/* switch VCLK and DCLK selection */ |
WREG32_P(CG_UPLL_FUNC_CNTL_2, |
VCLK_SRC_SEL(2) | DCLK_SRC_SEL(2), |
~(VCLK_SRC_SEL_MASK | DCLK_SRC_SEL_MASK)); |
|
mdelay(100); |
|
return 0; |
} |
|
void dce3_program_fmt(struct drm_encoder *encoder) |
{ |
struct drm_device *dev = encoder->dev; |
992,6 → 1080,8 |
WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp); |
WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp); |
WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp); |
WREG32(MC_VM_L1_TLB_MCB_RD_UVD_CNTL, tmp); |
WREG32(MC_VM_L1_TLB_MCB_WR_UVD_CNTL, tmp); |
WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE); |
WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE); |
WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12); |
1042,6 → 1132,8 |
WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp); |
WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp); |
WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp); |
WREG32(MC_VM_L1_TLB_MCB_RD_UVD_CNTL, tmp); |
WREG32(MC_VM_L1_TLB_MCB_WR_UVD_CNTL, tmp); |
radeon_gart_table_vram_unpin(rdev); |
} |
|
1338,7 → 1430,7 |
if (rdev->vram_scratch.robj == NULL) { |
r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, |
PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, |
0, NULL, &rdev->vram_scratch.robj); |
0, NULL, NULL, &rdev->vram_scratch.robj); |
if (r) { |
return r; |
} |
2792,12 → 2884,13 |
* Used by the radeon ttm implementation to move pages if |
* registered as the asic copy callback. |
*/ |
int r600_copy_cpdma(struct radeon_device *rdev, |
struct radeon_fence *r600_copy_cpdma(struct radeon_device *rdev, |
uint64_t src_offset, uint64_t dst_offset, |
unsigned num_gpu_pages, |
struct radeon_fence **fence) |
struct reservation_object *resv) |
{ |
struct radeon_semaphore *sem = NULL; |
struct radeon_fence *fence; |
struct radeon_sync sync; |
int ring_index = rdev->asic->copy.blit_ring_index; |
struct radeon_ring *ring = &rdev->ring[ring_index]; |
u32 size_in_bytes, cur_size_in_bytes, tmp; |
2804,11 → 2897,7 |
int i, num_loops; |
int r = 0; |
|
r = radeon_semaphore_create(rdev, &sem); |
if (r) { |
DRM_ERROR("radeon: moving bo (%d).\n", r); |
return r; |
} |
radeon_sync_create(&sync); |
|
size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT); |
num_loops = DIV_ROUND_UP(size_in_bytes, 0x1fffff); |
2815,12 → 2904,12 |
r = radeon_ring_lock(rdev, ring, num_loops * 6 + 24); |
if (r) { |
DRM_ERROR("radeon: moving bo (%d).\n", r); |
radeon_semaphore_free(rdev, &sem, NULL); |
return r; |
radeon_sync_free(rdev, &sync, NULL); |
return ERR_PTR(r); |
} |
|
radeon_semaphore_sync_to(sem, *fence); |
radeon_semaphore_sync_rings(rdev, sem, ring->idx); |
radeon_sync_resv(rdev, &sync, resv, false); |
radeon_sync_rings(rdev, &sync, ring->idx); |
|
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); |
radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2); |
2846,17 → 2935,17 |
radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2); |
radeon_ring_write(ring, WAIT_CP_DMA_IDLE_bit); |
|
r = radeon_fence_emit(rdev, fence, ring->idx); |
r = radeon_fence_emit(rdev, &fence, ring->idx); |
if (r) { |
radeon_ring_unlock_undo(rdev, ring); |
radeon_semaphore_free(rdev, &sem, NULL); |
return r; |
radeon_sync_free(rdev, &sync, NULL); |
return ERR_PTR(r); |
} |
|
radeon_ring_unlock_commit(rdev, ring, false); |
radeon_semaphore_free(rdev, &sem, *fence); |
radeon_sync_free(rdev, &sync, fence); |
|
return r; |
return fence; |
} |
|
int r600_set_surface_reg(struct radeon_device *rdev, int reg, |
3171,7 → 3260,7 |
r = radeon_bo_create(rdev, rdev->ih.ring_size, |
PAGE_SIZE, true, |
RADEON_GEM_DOMAIN_GTT, 0, |
NULL, &rdev->ih.ring_obj); |
NULL, NULL, &rdev->ih.ring_obj); |
if (r) { |
DRM_ERROR("radeon: failed to create ih ring buffer (%d).\n", r); |
return r; |