27,6 → 27,7 |
#include <drm/drmP.h> |
#include "radeon.h" |
#include "radeon_asic.h" |
#include "radeon_audio.h" |
#include <drm/radeon_drm.h> |
#include "nid.h" |
#include "atom.h" |
35,6 → 36,31 |
#include "radeon_ucode.h" |
#include "clearstate_cayman.h" |
|
/* |
* Indirect registers accessor |
*/ |
u32 tn_smc_rreg(struct radeon_device *rdev, u32 reg) |
{ |
unsigned long flags; |
u32 r; |
|
spin_lock_irqsave(&rdev->smc_idx_lock, flags); |
WREG32(TN_SMC_IND_INDEX_0, (reg)); |
r = RREG32(TN_SMC_IND_DATA_0); |
spin_unlock_irqrestore(&rdev->smc_idx_lock, flags); |
return r; |
} |
|
void tn_smc_wreg(struct radeon_device *rdev, u32 reg, u32 v) |
{ |
unsigned long flags; |
|
spin_lock_irqsave(&rdev->smc_idx_lock, flags); |
WREG32(TN_SMC_IND_INDEX_0, (reg)); |
WREG32(TN_SMC_IND_DATA_0, (v)); |
spin_unlock_irqrestore(&rdev->smc_idx_lock, flags); |
} |
|
static const u32 tn_rlc_save_restore_register_list[] = |
{ |
0x98fc, |
827,6 → 853,35 |
return err; |
} |
|
/** |
* cayman_get_allowed_info_register - fetch the register for the info ioctl |
* |
* @rdev: radeon_device pointer |
* @reg: register offset in bytes |
* @val: register value |
* |
* Returns 0 for success or -EINVAL for an invalid register |
* |
*/ |
int cayman_get_allowed_info_register(struct radeon_device *rdev, |
u32 reg, u32 *val) |
{ |
switch (reg) { |
case GRBM_STATUS: |
case GRBM_STATUS_SE0: |
case GRBM_STATUS_SE1: |
case SRBM_STATUS: |
case SRBM_STATUS2: |
case (DMA_STATUS_REG + DMA0_REGISTER_OFFSET): |
case (DMA_STATUS_REG + DMA1_REGISTER_OFFSET): |
case UVD_STATUS: |
*val = RREG32(reg); |
return 0; |
default: |
return -EINVAL; |
} |
} |
|
int tn_get_temp(struct radeon_device *rdev) |
{ |
u32 temp = RREG32_SMC(TN_CURRENT_GNB_TEMP) & 0x7ff; |
961,6 → 1016,8 |
} |
|
WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff)); |
WREG32(SRBM_INT_CNTL, 0x1); |
WREG32(SRBM_INT_ACK, 0x1); |
|
evergreen_fix_pci_max_read_req_size(rdev); |
|
1085,12 → 1142,12 |
|
if ((rdev->config.cayman.max_backends_per_se == 1) && |
(rdev->flags & RADEON_IS_IGP)) { |
if ((disabled_rb_mask & 3) == 1) { |
if ((disabled_rb_mask & 3) == 2) { |
/* RB1 disabled, RB0 enabled */ |
tmp = 0x00000000; |
} else { |
/* RB0 disabled, RB1 enabled */ |
tmp = 0x11111111; |
} else { |
/* RB1 disabled, RB0 enabled */ |
tmp = 0x00000000; |
} |
} else { |
tmp = gb_addr_config & NUM_PIPES_MASK; |
1269,7 → 1326,8 |
*/ |
for (i = 1; i < 8; i++) { |
WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR + (i << 2), 0); |
WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (i << 2), rdev->vm_manager.max_pfn); |
WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (i << 2), |
rdev->vm_manager.max_pfn - 1); |
WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2), |
rdev->vm_manager.saved_table_addr[i]); |
} |
1328,6 → 1386,13 |
radeon_gart_table_vram_unpin(rdev); |
} |
|
static void cayman_pcie_gart_fini(struct radeon_device *rdev) |
{ |
cayman_pcie_gart_disable(rdev); |
radeon_gart_table_vram_free(rdev); |
radeon_gart_fini(rdev); |
} |
|
void cayman_cp_int_cntl_setup(struct radeon_device *rdev, |
int ring, u32 cp_int_cntl) |
{ |
1554,6 → 1619,13 |
return 0; |
} |
|
static void cayman_cp_fini(struct radeon_device *rdev) |
{ |
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; |
cayman_cp_enable(rdev, false); |
radeon_ring_fini(rdev, ring); |
radeon_scratch_free(rdev, ring->rptr_save_reg); |
} |
|
static int cayman_cp_resume(struct radeon_device *rdev) |
{ |
1984,16 → 2056,35 |
return r; |
} |
|
// r = rv770_uvd_resume(rdev); |
// if (!r) { |
// r = radeon_fence_driver_start_ring(rdev, |
// R600_RING_TYPE_UVD_INDEX); |
// if (r) |
// dev_err(rdev->dev, "UVD fences init error (%d).\n", r); |
// } |
// if (r) |
// rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0; |
r = uvd_v2_2_resume(rdev); |
if (!r) { |
r = radeon_fence_driver_start_ring(rdev, |
R600_RING_TYPE_UVD_INDEX); |
if (r) |
dev_err(rdev->dev, "UVD fences init error (%d).\n", r); |
} |
if (r) |
rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0; |
|
if (rdev->family == CHIP_ARUBA) { |
r = radeon_vce_resume(rdev); |
if (!r) |
r = vce_v1_0_resume(rdev); |
|
if (!r) |
r = radeon_fence_driver_start_ring(rdev, |
TN_RING_TYPE_VCE1_INDEX); |
if (!r) |
r = radeon_fence_driver_start_ring(rdev, |
TN_RING_TYPE_VCE2_INDEX); |
|
if (r) { |
dev_err(rdev->dev, "VCE init error (%d).\n", r); |
rdev->ring[TN_RING_TYPE_VCE1_INDEX].ring_size = 0; |
rdev->ring[TN_RING_TYPE_VCE2_INDEX].ring_size = 0; |
} |
} |
|
r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP1_INDEX); |
if (r) { |
dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r); |
2028,7 → 2119,7 |
r = r600_irq_init(rdev); |
if (r) { |
DRM_ERROR("radeon: IH init failed (%d).\n", r); |
// radeon_irq_kms_fini(rdev); |
radeon_irq_kms_fini(rdev); |
return r; |
} |
evergreen_irq_set(rdev); |
2061,7 → 2152,31 |
if (r) |
return r; |
|
ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX]; |
if (ring->ring_size) { |
r = radeon_ring_init(rdev, ring, ring->ring_size, 0, |
RADEON_CP_PACKET2); |
if (!r) |
r = uvd_v1_0_init(rdev); |
if (r) |
DRM_ERROR("radeon: failed initializing UVD (%d).\n", r); |
} |
|
if (rdev->family == CHIP_ARUBA) { |
ring = &rdev->ring[TN_RING_TYPE_VCE1_INDEX]; |
if (ring->ring_size) |
r = radeon_ring_init(rdev, ring, ring->ring_size, 0, 0x0); |
|
ring = &rdev->ring[TN_RING_TYPE_VCE2_INDEX]; |
if (ring->ring_size) |
r = radeon_ring_init(rdev, ring, ring->ring_size, 0, 0x0); |
|
if (!r) |
r = vce_v1_0_init(rdev); |
if (r) |
DRM_ERROR("radeon: failed initializing VCE (%d).\n", r); |
} |
|
r = radeon_ib_pool_init(rdev); |
if (r) { |
dev_err(rdev->dev, "IB initialization failed (%d).\n", r); |
2169,13 → 2284,26 |
ring->ring_obj = NULL; |
r600_ring_init(rdev, ring, 64 * 1024); |
|
// r = radeon_uvd_init(rdev); |
// if (!r) { |
// ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX]; |
// ring->ring_obj = NULL; |
// r600_ring_init(rdev, ring, 4096); |
// } |
r = radeon_uvd_init(rdev); |
if (!r) { |
ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX]; |
ring->ring_obj = NULL; |
r600_ring_init(rdev, ring, 4096); |
} |
|
if (rdev->family == CHIP_ARUBA) { |
r = radeon_vce_init(rdev); |
if (!r) { |
ring = &rdev->ring[TN_RING_TYPE_VCE1_INDEX]; |
ring->ring_obj = NULL; |
r600_ring_init(rdev, ring, 4096); |
|
ring = &rdev->ring[TN_RING_TYPE_VCE2_INDEX]; |
ring->ring_obj = NULL; |
r600_ring_init(rdev, ring, 4096); |
} |
} |
|
rdev->ih.ring_obj = NULL; |
r600_ih_ring_init(rdev, 64 * 1024); |
|
2187,6 → 2315,16 |
r = cayman_startup(rdev); |
if (r) { |
dev_err(rdev->dev, "disabling GPU acceleration\n"); |
cayman_cp_fini(rdev); |
cayman_dma_fini(rdev); |
r600_irq_fini(rdev); |
if (rdev->flags & RADEON_IS_IGP) |
sumo_rlc_fini(rdev); |
radeon_wb_fini(rdev); |
radeon_ib_pool_fini(rdev); |
radeon_vm_manager_fini(rdev); |
radeon_irq_kms_fini(rdev); |
cayman_pcie_gart_fini(rdev); |
rdev->accel_working = false; |
} |
|
2205,6 → 2343,32 |
return 0; |
} |
|
void cayman_fini(struct radeon_device *rdev) |
{ |
radeon_pm_fini(rdev); |
cayman_cp_fini(rdev); |
cayman_dma_fini(rdev); |
r600_irq_fini(rdev); |
if (rdev->flags & RADEON_IS_IGP) |
sumo_rlc_fini(rdev); |
radeon_wb_fini(rdev); |
radeon_vm_manager_fini(rdev); |
radeon_ib_pool_fini(rdev); |
radeon_irq_kms_fini(rdev); |
uvd_v1_0_fini(rdev); |
radeon_uvd_fini(rdev); |
if (rdev->family == CHIP_ARUBA) |
radeon_vce_fini(rdev); |
cayman_pcie_gart_fini(rdev); |
r600_vram_scratch_fini(rdev); |
radeon_gem_fini(rdev); |
radeon_fence_driver_fini(rdev); |
radeon_bo_fini(rdev); |
radeon_atombios_fini(rdev); |
kfree(rdev->bios); |
rdev->bios = NULL; |
} |
|
/* |
* vm |
*/ |
2409,7 → 2573,48 |
radeon_ring_write(ring, PACKET0(VM_INVALIDATE_REQUEST, 0)); |
radeon_ring_write(ring, 1 << vm_id); |
|
/* wait for the invalidate to complete */ |
radeon_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5)); |
radeon_ring_write(ring, (WAIT_REG_MEM_FUNCTION(0) | /* always */ |
WAIT_REG_MEM_ENGINE(0))); /* me */ |
radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2); |
radeon_ring_write(ring, 0); |
radeon_ring_write(ring, 0); /* ref */ |
radeon_ring_write(ring, 0); /* mask */ |
radeon_ring_write(ring, 0x20); /* poll interval */ |
|
/* sync PFP to ME, otherwise we might get invalid PFP reads */ |
radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0)); |
radeon_ring_write(ring, 0x0); |
} |
|
int tn_set_vce_clocks(struct radeon_device *rdev, u32 evclk, u32 ecclk) |
{ |
struct atom_clock_dividers dividers; |
int r, i; |
|
r = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM, |
ecclk, false, ÷rs); |
if (r) |
return r; |
|
for (i = 0; i < 100; i++) { |
if (RREG32(CG_ECLK_STATUS) & ECLK_STATUS) |
break; |
mdelay(10); |
} |
if (i == 100) |
return -ETIMEDOUT; |
|
WREG32_P(CG_ECLK_CNTL, dividers.post_div, ~(ECLK_DIR_CNTL_EN|ECLK_DIVIDER_MASK)); |
|
for (i = 0; i < 100; i++) { |
if (RREG32(CG_ECLK_STATUS) & ECLK_STATUS) |
break; |
mdelay(10); |
} |
if (i == 100) |
return -ETIMEDOUT; |
|
return 0; |
} |