Subversion Repositories Kolibri OS

Compare Revisions

Regard whitespace Rev 3764 → Rev 5078

/drivers/video/drm/radeon/rv770.c
744,10 → 744,10
(const u32)ARRAY_SIZE(r7xx_golden_dyn_gpr_registers));
radeon_program_register_sequence(rdev,
rv730_golden_registers,
(const u32)ARRAY_SIZE(rv770_golden_registers));
(const u32)ARRAY_SIZE(rv730_golden_registers));
radeon_program_register_sequence(rdev,
rv730_mgcg_init,
(const u32)ARRAY_SIZE(rv770_mgcg_init));
(const u32)ARRAY_SIZE(rv730_mgcg_init));
break;
case CHIP_RV710:
radeon_program_register_sequence(rdev,
758,18 → 758,18
(const u32)ARRAY_SIZE(r7xx_golden_dyn_gpr_registers));
radeon_program_register_sequence(rdev,
rv710_golden_registers,
(const u32)ARRAY_SIZE(rv770_golden_registers));
(const u32)ARRAY_SIZE(rv710_golden_registers));
radeon_program_register_sequence(rdev,
rv710_mgcg_init,
(const u32)ARRAY_SIZE(rv770_mgcg_init));
(const u32)ARRAY_SIZE(rv710_mgcg_init));
break;
case CHIP_RV740:
radeon_program_register_sequence(rdev,
rv740_golden_registers,
(const u32)ARRAY_SIZE(rv770_golden_registers));
(const u32)ARRAY_SIZE(rv740_golden_registers));
radeon_program_register_sequence(rdev,
rv740_mgcg_init,
(const u32)ARRAY_SIZE(rv770_mgcg_init));
(const u32)ARRAY_SIZE(rv740_mgcg_init));
break;
default:
break;
801,7 → 801,7
return reference_clock;
}
 
u32 rv770_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
void rv770_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
{
struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
u32 tmp = RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset);
835,9 → 835,15
/* Unlock the lock, so double-buffering can take place inside vblank */
tmp &= ~AVIVO_D1GRPH_UPDATE_LOCK;
WREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
}
 
bool rv770_page_flip_pending(struct radeon_device *rdev, int crtc_id)
{
struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
 
/* Return current update_pending status: */
return RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING;
return !!(RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) &
AVIVO_D1GRPH_SURFACE_UPDATE_PENDING);
}
 
/* get temperature in millidegrees */
894,7 → 900,6
r = radeon_gart_table_vram_pin(rdev);
if (r)
return r;
radeon_gart_restore(rdev);
/* Setup L2 cache */
WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
1071,7 → 1076,8
*/
void r700_cp_stop(struct radeon_device *rdev)
{
// radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX)
radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT));
WREG32(SCRATCH_UMSK, 0);
rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
1115,7 → 1121,35
return 0;
}
 
void rv770_set_clk_bypass_mode(struct radeon_device *rdev)
{
u32 tmp, i;
 
if (rdev->flags & RADEON_IS_IGP)
return;
 
tmp = RREG32(CG_SPLL_FUNC_CNTL_2);
tmp &= SCLK_MUX_SEL_MASK;
tmp |= SCLK_MUX_SEL(1) | SCLK_MUX_UPDATE;
WREG32(CG_SPLL_FUNC_CNTL_2, tmp);
 
for (i = 0; i < rdev->usec_timeout; i++) {
if (RREG32(CG_SPLL_STATUS) & SPLL_CHG_STATUS)
break;
udelay(1);
}
 
tmp &= ~SCLK_MUX_UPDATE;
WREG32(CG_SPLL_FUNC_CNTL_2, tmp);
 
tmp = RREG32(MPLL_CNTL_MODE);
if ((rdev->family == CHIP_RV710) || (rdev->family == CHIP_RV730))
tmp &= ~RV730_MPLL_MCLK_SEL;
else
tmp &= ~MPLL_MCLK_SEL;
WREG32(MPLL_CNTL_MODE, tmp);
}
 
/*
* Core functions
*/
1135,7 → 1169,6
u32 hdp_host_path_cntl;
u32 sq_dyn_gpr_size_simd_ab_0;
u32 gb_tiling_config = 0;
u32 cc_rb_backend_disable = 0;
u32 cc_gc_shader_pipe_config = 0;
u32 mc_arb_ramcfg;
u32 db_debug4, tmp;
1269,21 → 1302,10
WREG32(SPI_CONFIG_CNTL, 0);
}
 
cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000;
tmp = R7XX_MAX_BACKENDS - r600_count_pipe_bits(cc_rb_backend_disable >> 16);
if (tmp < rdev->config.rv770.max_backends) {
rdev->config.rv770.max_backends = tmp;
}
 
cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00;
tmp = R7XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config >> 8) & R7XX_MAX_PIPES_MASK);
if (tmp < rdev->config.rv770.max_pipes) {
rdev->config.rv770.max_pipes = tmp;
}
tmp = R7XX_MAX_SIMDS - r600_count_pipe_bits((cc_gc_shader_pipe_config >> 16) & R7XX_MAX_SIMDS_MASK);
if (tmp < rdev->config.rv770.max_simds) {
rdev->config.rv770.max_simds = tmp;
}
tmp = rdev->config.rv770.max_simds -
r600_count_pipe_bits((cc_gc_shader_pipe_config >> 16) & R7XX_MAX_SIMDS_MASK);
rdev->config.rv770.active_simds = tmp;
 
switch (rdev->config.rv770.max_tile_pipes) {
case 1:
1303,6 → 1325,14
rdev->config.rv770.tiling_npipes = rdev->config.rv770.max_tile_pipes;
 
disabled_rb_mask = (RREG32(CC_RB_BACKEND_DISABLE) >> 16) & R7XX_MAX_BACKENDS_MASK;
tmp = 0;
for (i = 0; i < rdev->config.rv770.max_backends; i++)
tmp |= (1 << i);
/* if all the backends are disabled, fix it up here */
if ((disabled_rb_mask & tmp) == tmp) {
for (i = 0; i < rdev->config.rv770.max_backends; i++)
disabled_rb_mask &= ~(1 << i);
}
tmp = (gb_tiling_config & PIPE_TILING__MASK) >> PIPE_TILING__SHIFT;
tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.rv770.max_backends,
R7XX_MAX_BACKENDS, disabled_rb_mask);
1643,80 → 1673,6
return 0;
}
 
/**
* rv770_copy_dma - copy pages using the DMA engine
*
* @rdev: radeon_device pointer
* @src_offset: src GPU address
* @dst_offset: dst GPU address
* @num_gpu_pages: number of GPU pages to xfer
* @fence: radeon fence object
*
* Copy GPU paging using the DMA engine (r7xx).
* Used by the radeon ttm implementation to move pages if
* registered as the asic copy callback.
*/
int rv770_copy_dma(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset,
unsigned num_gpu_pages,
struct radeon_fence **fence)
{
struct radeon_semaphore *sem = NULL;
int ring_index = rdev->asic->copy.dma_ring_index;
struct radeon_ring *ring = &rdev->ring[ring_index];
u32 size_in_dw, cur_size_in_dw;
int i, num_loops;
int r = 0;
 
r = radeon_semaphore_create(rdev, &sem);
if (r) {
DRM_ERROR("radeon: moving bo (%d).\n", r);
return r;
}
 
size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
num_loops = DIV_ROUND_UP(size_in_dw, 0xFFFF);
r = radeon_ring_lock(rdev, ring, num_loops * 5 + 8);
if (r) {
DRM_ERROR("radeon: moving bo (%d).\n", r);
radeon_semaphore_free(rdev, &sem, NULL);
return r;
}
 
if (radeon_fence_need_sync(*fence, ring->idx)) {
radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
ring->idx);
radeon_fence_note_sync(*fence, ring->idx);
} else {
radeon_semaphore_free(rdev, &sem, NULL);
}
 
for (i = 0; i < num_loops; i++) {
cur_size_in_dw = size_in_dw;
if (cur_size_in_dw > 0xFFFF)
cur_size_in_dw = 0xFFFF;
size_in_dw -= cur_size_in_dw;
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, 0, cur_size_in_dw));
radeon_ring_write(ring, dst_offset & 0xfffffffc);
radeon_ring_write(ring, src_offset & 0xfffffffc);
radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff);
src_offset += cur_size_in_dw * 4;
dst_offset += cur_size_in_dw * 4;
}
 
r = radeon_fence_emit(rdev, fence, ring->idx);
if (r) {
radeon_ring_unlock_undo(rdev, ring);
return r;
}
 
radeon_ring_unlock_commit(rdev, ring);
radeon_semaphore_free(rdev, &sem, *fence);
 
return r;
}
 
static int rv770_startup(struct radeon_device *rdev)
{
struct radeon_ring *ring;
1725,19 → 1681,13
/* enable pcie gen2 link */
rv770_pcie_gen2_enable(rdev);
 
if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
r = r600_init_microcode(rdev);
if (r) {
DRM_ERROR("Failed to load firmware!\n");
return r;
}
}
 
/* scratch needs to be initialized before MC */
r = r600_vram_scratch_init(rdev);
if (r)
return r;
 
rv770_mc_program(rdev);
 
if (rdev->flags & RADEON_IS_AGP) {
rv770_agp_enable(rdev);
} else {
1747,12 → 1697,6
}
 
rv770_gpu_init(rdev);
r = r600_blit_init(rdev);
if (r) {
r600_blit_fini(rdev);
rdev->asic->copy.copy = NULL;
dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
}
 
/* allocate wb buffer */
r = radeon_wb_init(rdev);
1799,15 → 1743,13
 
ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
R600_CP_RB_RPTR, R600_CP_RB_WPTR,
0, 0xfffff, RADEON_CP_PACKET2);
RADEON_CP_PACKET2);
if (r)
return r;
 
ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
DMA_RB_RPTR, DMA_RB_WPTR,
2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
if (r)
return r;
 
1841,6 → 1783,11
return r;
}
 
r = r600_audio_init(rdev);
if (r) {
DRM_ERROR("radeon: audio init failed\n");
return r;
}
 
return 0;
}
1909,6 → 1856,17
if (r)
return r;
 
if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
r = r600_init_microcode(rdev);
if (r) {
DRM_ERROR("Failed to load firmware!\n");
return r;
}
}
 
/* Initialize power management */
radeon_pm_init(rdev);
 
rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);