25,11 → 25,13 |
* Alex Deucher |
* Jerome Glisse |
*/ |
#include <linux/slab.h> |
#include <linux/seq_file.h> |
#include <linux/firmware.h> |
#include "drmP.h" |
#include "radeon_drm.h" |
#include "radeon.h" |
#include "radeon_asic.h" |
#include "radeon_mode.h" |
#include "r600d.h" |
#include "atom.h" |
41,6 → 43,10 |
#define R700_PFP_UCODE_SIZE 848 |
#define R700_PM4_UCODE_SIZE 1360 |
#define R700_RLC_UCODE_SIZE 1024 |
#define EVERGREEN_PFP_UCODE_SIZE 1120 |
#define EVERGREEN_PM4_UCODE_SIZE 1376 |
#define EVERGREEN_RLC_UCODE_SIZE 768 |
#define CAYMAN_RLC_UCODE_SIZE 1024 |
|
/* Firmware Names */ |
MODULE_FIRMWARE("radeon/R600_pfp.bin"); |
65,6 → 71,25 |
MODULE_FIRMWARE("radeon/RV710_me.bin"); |
MODULE_FIRMWARE("radeon/R600_rlc.bin"); |
MODULE_FIRMWARE("radeon/R700_rlc.bin"); |
MODULE_FIRMWARE("radeon/CEDAR_pfp.bin"); |
MODULE_FIRMWARE("radeon/CEDAR_me.bin"); |
MODULE_FIRMWARE("radeon/CEDAR_rlc.bin"); |
MODULE_FIRMWARE("radeon/REDWOOD_pfp.bin"); |
MODULE_FIRMWARE("radeon/REDWOOD_me.bin"); |
MODULE_FIRMWARE("radeon/REDWOOD_rlc.bin"); |
MODULE_FIRMWARE("radeon/JUNIPER_pfp.bin"); |
MODULE_FIRMWARE("radeon/JUNIPER_me.bin"); |
MODULE_FIRMWARE("radeon/JUNIPER_rlc.bin"); |
MODULE_FIRMWARE("radeon/CYPRESS_pfp.bin"); |
MODULE_FIRMWARE("radeon/CYPRESS_me.bin"); |
MODULE_FIRMWARE("radeon/CYPRESS_rlc.bin"); |
MODULE_FIRMWARE("radeon/PALM_pfp.bin"); |
MODULE_FIRMWARE("radeon/PALM_me.bin"); |
MODULE_FIRMWARE("radeon/SUMO_rlc.bin"); |
MODULE_FIRMWARE("radeon/SUMO_pfp.bin"); |
MODULE_FIRMWARE("radeon/SUMO_me.bin"); |
MODULE_FIRMWARE("radeon/SUMO2_pfp.bin"); |
MODULE_FIRMWARE("radeon/SUMO2_me.bin"); |
|
int r600_debugfs_mc_info_init(struct radeon_device *rdev); |
|
72,7 → 97,35 |
int r600_mc_wait_for_idle(struct radeon_device *rdev); |
void r600_gpu_init(struct radeon_device *rdev); |
void r600_fini(struct radeon_device *rdev); |
void r600_irq_disable(struct radeon_device *rdev); |
static void r600_pcie_gen2_enable(struct radeon_device *rdev); |
|
/* get temperature in millidegrees */ |
int rv6xx_get_temp(struct radeon_device *rdev) |
{ |
u32 temp = (RREG32(CG_THERMAL_STATUS) & ASIC_T_MASK) >> |
ASIC_T_SHIFT; |
int actual_temp = temp & 0xff; |
|
if (temp & 0x100) |
actual_temp -= 256; |
|
return actual_temp * 1000; |
} |
|
|
|
|
|
|
bool r600_gui_idle(struct radeon_device *rdev) |
{ |
if (RREG32(GRBM_STATUS) & GUI_ACTIVE) |
return false; |
else |
return true; |
} |
|
/* hpd for digital panel detect/disconnect */ |
bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd) |
{ |
358,6 → 411,19 |
u32 tmp; |
|
/* flush hdp cache so updates hit vram */ |
if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) && |
!(rdev->flags & RADEON_IS_AGP)) { |
void __iomem *ptr = (void *)rdev->gart.table.vram.ptr; |
u32 tmp; |
|
/* r7xx hw bug. write to HDP_DEBUG1 followed by fb read |
* rather than write to HDP_REG_COHERENCY_FLUSH_CNTL |
* This seems to cause problems on some AGP cards. Just use the old |
* method for them. |
*/ |
WREG32(HDP_DEBUG1, 0); |
tmp = readl((void __iomem *)ptr); |
} else |
WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1); |
|
WREG32(VM_CONTEXT0_INVALIDATION_LOW_ADDR, rdev->mc.gtt_start >> 12); |
383,7 → 449,7 |
int r; |
|
if (rdev->gart.table.vram.robj) { |
WARN(1, "R600 PCIE GART already initialized.\n"); |
WARN(1, "R600 PCIE GART already initialized\n"); |
return 0; |
} |
/* Initialize common gart structure */ |
490,9 → 556,9 |
|
void r600_pcie_gart_fini(struct radeon_device *rdev) |
{ |
radeon_gart_fini(rdev); |
r600_pcie_gart_disable(rdev); |
radeon_gart_table_vram_free(rdev); |
radeon_gart_fini(rdev); |
} |
|
void r600_agp_enable(struct radeon_device *rdev) |
591,7 → 657,7 |
WREG32(MC_VM_FB_LOCATION, tmp); |
WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8)); |
WREG32(HDP_NONSURFACE_INFO, (2 << 7)); |
WREG32(HDP_NONSURFACE_SIZE, rdev->mc.mc_vram_size | 0x3FF); |
WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF); |
if (rdev->flags & RADEON_IS_AGP) { |
WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 22); |
WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 22); |
631,7 → 697,7 |
* Note: GTT start, end, size should be initialized before calling this |
* function on AGP platform. |
*/ |
void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc) |
static void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc) |
{ |
u64 size_bf, size_af; |
|
665,9 → 731,12 |
mc->vram_end, mc->real_vram_size >> 20); |
} else { |
u64 base = 0; |
if (rdev->flags & RADEON_IS_IGP) |
base = (RREG32(MC_VM_FB_LOCATION) & 0xFFFF) << 24; |
if (rdev->flags & RADEON_IS_IGP) { |
base = RREG32(MC_VM_FB_LOCATION) & 0xFFFF; |
base <<= 24; |
} |
radeon_vram_location(rdev, &rdev->mc, base); |
rdev->mc.gtt_base_align = 0; |
radeon_gtt_location(rdev, mc); |
} |
} |
674,7 → 743,6 |
|
int r600_mc_init(struct radeon_device *rdev) |
{ |
fixed20_12 a; |
u32 tmp; |
int chansize, numchan; |
|
706,26 → 774,19 |
} |
rdev->mc.vram_width = numchan * chansize; |
/* Could aper size report 0 ? */ |
rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0); |
rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0); |
rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0); |
rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0); |
/* Setup GPU memory space */ |
rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE); |
rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE); |
rdev->mc.visible_vram_size = rdev->mc.aper_size; |
/* FIXME remove this once we support unmappable VRAM */ |
if (rdev->mc.mc_vram_size > rdev->mc.aper_size) { |
rdev->mc.mc_vram_size = rdev->mc.aper_size; |
rdev->mc.real_vram_size = rdev->mc.aper_size; |
} |
r600_vram_gtt_location(rdev, &rdev->mc); |
/* FIXME: we should enforce default clock in case GPU is not in |
* default setup |
*/ |
a.full = rfixed_const(100); |
rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk); |
rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a); |
if (rdev->flags & RADEON_IS_IGP) |
|
if (rdev->flags & RADEON_IS_IGP) { |
rs690_pm_info(rdev); |
rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); |
} |
radeon_update_bandwidth_info(rdev); |
return 0; |
} |
|
752,9 → 813,11 |
S_008014_DB2_BUSY(1) | S_008014_DB3_BUSY(1) | |
S_008014_CB0_BUSY(1) | S_008014_CB1_BUSY(1) | |
S_008014_CB2_BUSY(1) | S_008014_CB3_BUSY(1); |
u32 srbm_reset = 0; |
u32 tmp; |
|
if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE)) |
return 0; |
|
dev_info(rdev->dev, "GPU softreset \n"); |
dev_info(rdev->dev, " R_008010_GRBM_STATUS=0x%08X\n", |
RREG32(R_008010_GRBM_STATUS)); |
767,7 → 830,7 |
dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); |
} |
/* Disable CP parsing/prefetching */ |
WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(0xff)); |
WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1)); |
/* Check if any of the rendering block is busy and reset it */ |
if ((RREG32(R_008010_GRBM_STATUS) & grbm_busy_mask) || |
(RREG32(R_008014_GRBM_STATUS2) & grbm2_busy_mask)) { |
786,57 → 849,19 |
S_008020_SOFT_RESET_VGT(1); |
dev_info(rdev->dev, " R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp); |
WREG32(R_008020_GRBM_SOFT_RESET, tmp); |
(void)RREG32(R_008020_GRBM_SOFT_RESET); |
udelay(50); |
RREG32(R_008020_GRBM_SOFT_RESET); |
mdelay(15); |
WREG32(R_008020_GRBM_SOFT_RESET, 0); |
(void)RREG32(R_008020_GRBM_SOFT_RESET); |
} |
/* Reset CP (we always reset CP) */ |
tmp = S_008020_SOFT_RESET_CP(1); |
dev_info(rdev->dev, "R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp); |
WREG32(R_008020_GRBM_SOFT_RESET, tmp); |
(void)RREG32(R_008020_GRBM_SOFT_RESET); |
udelay(50); |
RREG32(R_008020_GRBM_SOFT_RESET); |
mdelay(15); |
WREG32(R_008020_GRBM_SOFT_RESET, 0); |
(void)RREG32(R_008020_GRBM_SOFT_RESET); |
/* Reset others GPU block if necessary */ |
if (G_000E50_RLC_BUSY(RREG32(R_000E50_SRBM_STATUS))) |
srbm_reset |= S_000E60_SOFT_RESET_RLC(1); |
if (G_000E50_GRBM_RQ_PENDING(RREG32(R_000E50_SRBM_STATUS))) |
srbm_reset |= S_000E60_SOFT_RESET_GRBM(1); |
if (G_000E50_HI_RQ_PENDING(RREG32(R_000E50_SRBM_STATUS))) |
srbm_reset |= S_000E60_SOFT_RESET_IH(1); |
if (G_000E50_VMC_BUSY(RREG32(R_000E50_SRBM_STATUS))) |
srbm_reset |= S_000E60_SOFT_RESET_VMC(1); |
if (G_000E50_MCB_BUSY(RREG32(R_000E50_SRBM_STATUS))) |
srbm_reset |= S_000E60_SOFT_RESET_MC(1); |
if (G_000E50_MCDZ_BUSY(RREG32(R_000E50_SRBM_STATUS))) |
srbm_reset |= S_000E60_SOFT_RESET_MC(1); |
if (G_000E50_MCDY_BUSY(RREG32(R_000E50_SRBM_STATUS))) |
srbm_reset |= S_000E60_SOFT_RESET_MC(1); |
if (G_000E50_MCDX_BUSY(RREG32(R_000E50_SRBM_STATUS))) |
srbm_reset |= S_000E60_SOFT_RESET_MC(1); |
if (G_000E50_MCDW_BUSY(RREG32(R_000E50_SRBM_STATUS))) |
srbm_reset |= S_000E60_SOFT_RESET_MC(1); |
if (G_000E50_RLC_BUSY(RREG32(R_000E50_SRBM_STATUS))) |
srbm_reset |= S_000E60_SOFT_RESET_RLC(1); |
if (G_000E50_SEM_BUSY(RREG32(R_000E50_SRBM_STATUS))) |
srbm_reset |= S_000E60_SOFT_RESET_SEM(1); |
if (G_000E50_BIF_BUSY(RREG32(R_000E50_SRBM_STATUS))) |
srbm_reset |= S_000E60_SOFT_RESET_BIF(1); |
dev_info(rdev->dev, " R_000E60_SRBM_SOFT_RESET=0x%08X\n", srbm_reset); |
WREG32(R_000E60_SRBM_SOFT_RESET, srbm_reset); |
(void)RREG32(R_000E60_SRBM_SOFT_RESET); |
udelay(50); |
WREG32(R_000E60_SRBM_SOFT_RESET, 0); |
(void)RREG32(R_000E60_SRBM_SOFT_RESET); |
WREG32(R_000E60_SRBM_SOFT_RESET, srbm_reset); |
(void)RREG32(R_000E60_SRBM_SOFT_RESET); |
udelay(50); |
WREG32(R_000E60_SRBM_SOFT_RESET, 0); |
(void)RREG32(R_000E60_SRBM_SOFT_RESET); |
/* Wait a little for things to settle down */ |
udelay(50); |
mdelay(1); |
dev_info(rdev->dev, " R_008010_GRBM_STATUS=0x%08X\n", |
RREG32(R_008010_GRBM_STATUS)); |
dev_info(rdev->dev, " R_008014_GRBM_STATUS2=0x%08X\n", |
843,16 → 868,44 |
RREG32(R_008014_GRBM_STATUS2)); |
dev_info(rdev->dev, " R_000E50_SRBM_STATUS=0x%08X\n", |
RREG32(R_000E50_SRBM_STATUS)); |
/* After reset we need to reinit the asic as GPU often endup in an |
* incoherent state. |
*/ |
atom_asic_init(rdev->mode_info.atom_context); |
rv515_mc_resume(rdev, &save); |
return 0; |
} |
|
int r600_gpu_reset(struct radeon_device *rdev) |
bool r600_gpu_is_lockup(struct radeon_device *rdev) |
{ |
u32 srbm_status; |
u32 grbm_status; |
u32 grbm_status2; |
struct r100_gpu_lockup *lockup; |
int r; |
|
if (rdev->family >= CHIP_RV770) |
lockup = &rdev->config.rv770.lockup; |
else |
lockup = &rdev->config.r600.lockup; |
|
srbm_status = RREG32(R_000E50_SRBM_STATUS); |
grbm_status = RREG32(R_008010_GRBM_STATUS); |
grbm_status2 = RREG32(R_008014_GRBM_STATUS2); |
if (!G_008010_GUI_ACTIVE(grbm_status)) { |
r100_gpu_lockup_update(lockup, &rdev->cp); |
return false; |
} |
/* force CP activities */ |
r = radeon_ring_lock(rdev, 2); |
if (!r) { |
/* PACKET2 NOP */ |
radeon_ring_write(rdev, 0x80000000); |
radeon_ring_write(rdev, 0x80000000); |
radeon_ring_unlock_commit(rdev); |
} |
rdev->cp.rptr = RREG32(R600_CP_RB_RPTR); |
return r100_gpu_cp_is_lockup(rdev, lockup, &rdev->cp); |
} |
|
int r600_asic_reset(struct radeon_device *rdev) |
{ |
return r600_gpu_soft_reset(rdev); |
} |
|
1095,7 → 1148,10 |
rdev->config.r600.tiling_npipes = rdev->config.r600.max_tile_pipes; |
rdev->config.r600.tiling_nbanks = 4 << ((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT); |
tiling_config |= BANK_TILING((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT); |
tiling_config |= GROUP_SIZE(0); |
tiling_config |= GROUP_SIZE((ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT); |
if ((ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT) |
rdev->config.r600.tiling_group_size = 512; |
else |
rdev->config.r600.tiling_group_size = 256; |
tmp = (ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT; |
if (tmp > 3) { |
1122,7 → 1178,7 |
r600_count_pipe_bits((cc_rb_backend_disable & |
R6XX_MAX_BACKENDS_MASK) >> 16)), |
(cc_rb_backend_disable >> 16)); |
|
rdev->config.r600.tile_config = tiling_config; |
tiling_config |= BACKEND_MAP(backend_map); |
WREG32(GB_TILING_CONFIG, tiling_config); |
WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff); |
1131,6 → 1187,7 |
/* Setup pipes */ |
WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable); |
WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); |
WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); |
|
tmp = R6XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8); |
WREG32(VGT_OUT_DEALLOC_CNTL, (tmp * 4) & DEALLOC_DIST_MASK); |
1404,7 → 1461,9 |
*/ |
void r600_cp_stop(struct radeon_device *rdev) |
{ |
// radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); |
WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1)); |
WREG32(SCRATCH_UMSK, 0); |
} |
|
int r600_init_microcode(struct radeon_device *rdev) |
1468,10 → 1527,35 |
chip_name = "RV710"; |
rlc_chip_name = "R700"; |
break; |
case CHIP_CEDAR: |
chip_name = "CEDAR"; |
rlc_chip_name = "CEDAR"; |
break; |
case CHIP_REDWOOD: |
chip_name = "REDWOOD"; |
rlc_chip_name = "REDWOOD"; |
break; |
case CHIP_JUNIPER: |
chip_name = "JUNIPER"; |
rlc_chip_name = "JUNIPER"; |
break; |
case CHIP_CYPRESS: |
case CHIP_HEMLOCK: |
chip_name = "CYPRESS"; |
rlc_chip_name = "CYPRESS"; |
break; |
case CHIP_PALM: |
chip_name = "PALM"; |
rlc_chip_name = "SUMO"; |
break; |
default: BUG(); |
} |
|
if (rdev->family >= CHIP_RV770) { |
if (rdev->family >= CHIP_CEDAR) { |
pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4; |
me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4; |
rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4; |
} else if (rdev->family >= CHIP_RV770) { |
pfp_req_size = R700_PFP_UCODE_SIZE * 4; |
me_req_size = R700_PM4_UCODE_SIZE * 4; |
rlc_req_size = R700_RLC_UCODE_SIZE * 4; |
1545,7 → 1629,11 |
|
r600_cp_stop(rdev); |
|
WREG32(CP_RB_CNTL, RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3)); |
WREG32(CP_RB_CNTL, |
#ifdef __BIG_ENDIAN |
BUF_SWAP_32BIT | |
#endif |
RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3)); |
|
/* Reset cp */ |
WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP); |
1585,12 → 1673,12 |
} |
radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5)); |
radeon_ring_write(rdev, 0x1); |
if (rdev->family < CHIP_RV770) { |
if (rdev->family >= CHIP_RV770) { |
radeon_ring_write(rdev, 0x0); |
radeon_ring_write(rdev, rdev->config.rv770.max_hw_contexts - 1); |
} else { |
radeon_ring_write(rdev, 0x3); |
radeon_ring_write(rdev, rdev->config.r600.max_hw_contexts - 1); |
} else { |
radeon_ring_write(rdev, 0x0); |
radeon_ring_write(rdev, rdev->config.rv770.max_hw_contexts - 1); |
} |
radeon_ring_write(rdev, PACKET3_ME_INITIALIZE_DEVICE_ID(1)); |
radeon_ring_write(rdev, 0); |
1616,7 → 1704,7 |
|
/* Set ring buffer size */ |
rb_bufsz = drm_order(rdev->cp.ring_size / 8); |
tmp = RB_NO_UPDATE | (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; |
tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; |
#ifdef __BIG_ENDIAN |
tmp |= BUF_SWAP_32BIT; |
#endif |
1630,8 → 1718,23 |
WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA); |
WREG32(CP_RB_RPTR_WR, 0); |
WREG32(CP_RB_WPTR, 0); |
WREG32(CP_RB_RPTR_ADDR, rdev->cp.gpu_addr & 0xFFFFFFFF); |
WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->cp.gpu_addr)); |
|
/* set the wb address whether it's enabled or not */ |
WREG32(CP_RB_RPTR_ADDR, |
#ifdef __BIG_ENDIAN |
RB_RPTR_SWAP(2) | |
#endif |
((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC)); |
WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF); |
WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF); |
|
if (rdev->wb.enabled) |
WREG32(SCRATCH_UMSK, 0xff); |
else { |
tmp |= RB_NO_UPDATE; |
WREG32(SCRATCH_UMSK, 0); |
} |
|
mdelay(1); |
WREG32(CP_RB_CNTL, tmp); |
|
1668,7 → 1771,13 |
rdev->cp.align_mask = 16 - 1; |
} |
|
void r600_cp_fini(struct radeon_device *rdev) |
{ |
r600_cp_stop(rdev); |
radeon_ring_fini(rdev); |
} |
|
|
/* |
* GPU scratch registers helpers function. |
*/ |
1677,9 → 1786,10 |
int i; |
|
rdev->scratch.num_reg = 7; |
rdev->scratch.reg_base = SCRATCH_REG0; |
for (i = 0; i < rdev->scratch.num_reg; i++) { |
rdev->scratch.free[i] = true; |
rdev->scratch.reg[i] = SCRATCH_REG0 + (i * 4); |
rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4); |
} |
} |
|
1722,13 → 1832,23 |
radeon_scratch_free(rdev, scratch); |
return r; |
} |
|
void r600_fence_ring_emit(struct radeon_device *rdev, |
struct radeon_fence *fence) |
{ |
/* Also consider EVENT_WRITE_EOP. it handles the interrupts + timestamps + events */ |
|
if (rdev->wb.use_event) { |
u64 addr = rdev->wb.gpu_addr + R600_WB_EVENT_OFFSET + |
(u64)(rdev->fence_drv.scratch_reg - rdev->scratch.reg_base); |
/* EVENT_WRITE_EOP - flush caches, send int */ |
radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE_EOP, 4)); |
radeon_ring_write(rdev, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5)); |
radeon_ring_write(rdev, addr & 0xffffffff); |
radeon_ring_write(rdev, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2)); |
radeon_ring_write(rdev, fence->seq); |
radeon_ring_write(rdev, 0); |
} else { |
radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE, 0)); |
radeon_ring_write(rdev, CACHE_FLUSH_AND_INV_EVENT); |
radeon_ring_write(rdev, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT) | EVENT_INDEX(0)); |
/* wait for 3D idle clean */ |
radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1)); |
radeon_ring_write(rdev, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2); |
1741,6 → 1861,9 |
radeon_ring_write(rdev, PACKET0(CP_INT_STATUS, 0)); |
radeon_ring_write(rdev, RB_INT_STAT); |
} |
} |
|
|
int r600_set_surface_reg(struct radeon_device *rdev, int reg, |
uint32_t tiling_flags, uint32_t pitch, |
uint32_t offset, uint32_t obj_size) |
1754,28 → 1877,13 |
/* FIXME: implement */ |
} |
|
|
bool r600_card_posted(struct radeon_device *rdev) |
{ |
uint32_t reg; |
|
/* first check CRTCs */ |
reg = RREG32(D1CRTC_CONTROL) | |
RREG32(D2CRTC_CONTROL); |
if (reg & CRTC_EN) |
return true; |
|
/* then check MEM_SIZE, in case the crtcs are off */ |
if (RREG32(CONFIG_MEMSIZE)) |
return true; |
|
return false; |
} |
|
int r600_startup(struct radeon_device *rdev) |
{ |
int r; |
|
/* enable pcie gen2 link */ |
r600_pcie_gen2_enable(rdev); |
|
if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) { |
r = r600_init_microcode(rdev); |
if (r) { |
1803,8 → 1911,7 |
r = r600_cp_resume(rdev); |
if (r) |
return r; |
/* write back buffer are not vital so don't worry about failure */ |
// r600_wb_enable(rdev); |
|
return 0; |
} |
|
1836,16 → 1943,13 |
{ |
int r; |
|
r = radeon_dummy_page_init(rdev); |
if (r) |
return r; |
if (r600_debugfs_mc_info_init(rdev)) { |
DRM_ERROR("Failed to register debugfs file for mc !\n"); |
} |
/* This don't do much */ |
r = radeon_gem_init(rdev); |
if (r) |
return r; |
// r = radeon_gem_init(rdev); |
// if (r) |
// return r; |
/* Read BIOS */ |
if (!radeon_get_bios(rdev)) { |
if (ASIC_IS_AVIVO(rdev)) |
1860,7 → 1964,7 |
if (r) |
return r; |
/* Post card if necessary */ |
if (!r600_card_posted(rdev)) { |
if (!radeon_card_posted(rdev)) { |
if (!rdev->bios) { |
dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n"); |
return -EINVAL; |
1874,12 → 1978,10 |
radeon_surface_init(rdev); |
/* Initialize clocks */ |
radeon_get_clock_info(rdev->ddev); |
r = radeon_clocks_init(rdev); |
if (r) |
return r; |
/* Initialize power management */ |
radeon_pm_init(rdev); |
/* Fence driver */ |
// r = radeon_fence_driver_init(rdev); |
// if (r) |
// return r; |
if (rdev->flags & RADEON_IS_AGP) { |
r = radeon_agp_init(rdev); |
if (r) |
1932,7 → 2034,43 |
return 0; |
} |
|
static void r600_disable_interrupt_state(struct radeon_device *rdev) |
{ |
u32 tmp; |
|
WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE); |
WREG32(GRBM_INT_CNTL, 0); |
WREG32(DxMODE_INT_MASK, 0); |
WREG32(D1GRPH_INTERRUPT_CONTROL, 0); |
WREG32(D2GRPH_INTERRUPT_CONTROL, 0); |
if (ASIC_IS_DCE3(rdev)) { |
WREG32(DCE3_DACA_AUTODETECT_INT_CONTROL, 0); |
WREG32(DCE3_DACB_AUTODETECT_INT_CONTROL, 0); |
tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY; |
WREG32(DC_HPD1_INT_CONTROL, tmp); |
tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY; |
WREG32(DC_HPD2_INT_CONTROL, tmp); |
tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY; |
WREG32(DC_HPD3_INT_CONTROL, tmp); |
tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY; |
WREG32(DC_HPD4_INT_CONTROL, tmp); |
if (ASIC_IS_DCE32(rdev)) { |
tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY; |
WREG32(DC_HPD5_INT_CONTROL, tmp); |
tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY; |
WREG32(DC_HPD6_INT_CONTROL, tmp); |
} |
} else { |
WREG32(DACA_AUTODETECT_INT_CONTROL, 0); |
WREG32(DACB_AUTODETECT_INT_CONTROL, 0); |
tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY; |
WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp); |
tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY; |
WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp); |
tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY; |
WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp); |
} |
} |
|
|
|
1940,6 → 2078,7 |
|
|
|
|
/* |
* Debugfs info |
*/ |
2007,5 → 2146,237 |
*/ |
void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo) |
{ |
/* r7xx hw bug. write to HDP_DEBUG1 followed by fb read |
* rather than write to HDP_REG_COHERENCY_FLUSH_CNTL. |
* This seems to cause problems on some AGP cards. Just use the old |
* method for them. |
*/ |
if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) && |
rdev->vram_scratch.ptr && !(rdev->flags & RADEON_IS_AGP)) { |
void __iomem *ptr = (void *)rdev->vram_scratch.ptr; |
u32 tmp; |
|
WREG32(HDP_DEBUG1, 0); |
tmp = readl((void __iomem *)ptr); |
} else |
WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1); |
} |
|
void r600_set_pcie_lanes(struct radeon_device *rdev, int lanes) |
{ |
u32 link_width_cntl, mask, target_reg; |
|
if (rdev->flags & RADEON_IS_IGP) |
return; |
|
if (!(rdev->flags & RADEON_IS_PCIE)) |
return; |
|
/* x2 cards have a special sequence */ |
if (ASIC_IS_X2(rdev)) |
return; |
|
/* FIXME wait for idle */ |
|
switch (lanes) { |
case 0: |
mask = RADEON_PCIE_LC_LINK_WIDTH_X0; |
break; |
case 1: |
mask = RADEON_PCIE_LC_LINK_WIDTH_X1; |
break; |
case 2: |
mask = RADEON_PCIE_LC_LINK_WIDTH_X2; |
break; |
case 4: |
mask = RADEON_PCIE_LC_LINK_WIDTH_X4; |
break; |
case 8: |
mask = RADEON_PCIE_LC_LINK_WIDTH_X8; |
break; |
case 12: |
mask = RADEON_PCIE_LC_LINK_WIDTH_X12; |
break; |
case 16: |
default: |
mask = RADEON_PCIE_LC_LINK_WIDTH_X16; |
break; |
} |
|
link_width_cntl = RREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL); |
|
if ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) == |
(mask << RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT)) |
return; |
|
if (link_width_cntl & R600_PCIE_LC_UPCONFIGURE_DIS) |
return; |
|
link_width_cntl &= ~(RADEON_PCIE_LC_LINK_WIDTH_MASK | |
RADEON_PCIE_LC_RECONFIG_NOW | |
R600_PCIE_LC_RENEGOTIATE_EN | |
R600_PCIE_LC_RECONFIG_ARC_MISSING_ESCAPE); |
link_width_cntl |= mask; |
|
WREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl); |
|
/* some northbridges can renegotiate the link rather than requiring |
* a complete re-config. |
* e.g., AMD 780/790 northbridges (pci ids: 0x5956, 0x5957, 0x5958, etc.) |
*/ |
if (link_width_cntl & R600_PCIE_LC_RENEGOTIATION_SUPPORT) |
link_width_cntl |= R600_PCIE_LC_RENEGOTIATE_EN | R600_PCIE_LC_UPCONFIGURE_SUPPORT; |
else |
link_width_cntl |= R600_PCIE_LC_RECONFIG_ARC_MISSING_ESCAPE; |
|
WREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL, (link_width_cntl | |
RADEON_PCIE_LC_RECONFIG_NOW)); |
|
if (rdev->family >= CHIP_RV770) |
target_reg = R700_TARGET_AND_CURRENT_PROFILE_INDEX; |
else |
target_reg = R600_TARGET_AND_CURRENT_PROFILE_INDEX; |
|
/* wait for lane set to complete */ |
link_width_cntl = RREG32(target_reg); |
while (link_width_cntl == 0xffffffff) |
link_width_cntl = RREG32(target_reg); |
|
} |
|
int r600_get_pcie_lanes(struct radeon_device *rdev) |
{ |
u32 link_width_cntl; |
|
if (rdev->flags & RADEON_IS_IGP) |
return 0; |
|
if (!(rdev->flags & RADEON_IS_PCIE)) |
return 0; |
|
/* x2 cards have a special sequence */ |
if (ASIC_IS_X2(rdev)) |
return 0; |
|
/* FIXME wait for idle */ |
|
link_width_cntl = RREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL); |
|
switch ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) >> RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT) { |
case RADEON_PCIE_LC_LINK_WIDTH_X0: |
return 0; |
case RADEON_PCIE_LC_LINK_WIDTH_X1: |
return 1; |
case RADEON_PCIE_LC_LINK_WIDTH_X2: |
return 2; |
case RADEON_PCIE_LC_LINK_WIDTH_X4: |
return 4; |
case RADEON_PCIE_LC_LINK_WIDTH_X8: |
return 8; |
case RADEON_PCIE_LC_LINK_WIDTH_X16: |
default: |
return 16; |
} |
} |
|
static void r600_pcie_gen2_enable(struct radeon_device *rdev) |
{ |
u32 link_width_cntl, lanes, speed_cntl, training_cntl, tmp; |
u16 link_cntl2; |
|
if (radeon_pcie_gen2 == 0) |
return; |
|
if (rdev->flags & RADEON_IS_IGP) |
return; |
|
if (!(rdev->flags & RADEON_IS_PCIE)) |
return; |
|
/* x2 cards have a special sequence */ |
if (ASIC_IS_X2(rdev)) |
return; |
|
/* only RV6xx+ chips are supported */ |
if (rdev->family <= CHIP_R600) |
return; |
|
/* 55 nm r6xx asics */ |
if ((rdev->family == CHIP_RV670) || |
(rdev->family == CHIP_RV620) || |
(rdev->family == CHIP_RV635)) { |
/* advertise upconfig capability */ |
link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL); |
link_width_cntl &= ~LC_UPCONFIGURE_DIS; |
WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl); |
link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL); |
if (link_width_cntl & LC_RENEGOTIATION_SUPPORT) { |
lanes = (link_width_cntl & LC_LINK_WIDTH_RD_MASK) >> LC_LINK_WIDTH_RD_SHIFT; |
link_width_cntl &= ~(LC_LINK_WIDTH_MASK | |
LC_RECONFIG_ARC_MISSING_ESCAPE); |
link_width_cntl |= lanes | LC_RECONFIG_NOW | LC_RENEGOTIATE_EN; |
WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl); |
} else { |
link_width_cntl |= LC_UPCONFIGURE_DIS; |
WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl); |
} |
} |
|
speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL); |
if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) && |
(speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) { |
|
/* 55 nm r6xx asics */ |
if ((rdev->family == CHIP_RV670) || |
(rdev->family == CHIP_RV620) || |
(rdev->family == CHIP_RV635)) { |
WREG32(MM_CFGREGS_CNTL, 0x8); |
link_cntl2 = RREG32(0x4088); |
WREG32(MM_CFGREGS_CNTL, 0); |
/* not supported yet */ |
if (link_cntl2 & SELECTABLE_DEEMPHASIS) |
return; |
} |
|
speed_cntl &= ~LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK; |
speed_cntl |= (0x3 << LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT); |
speed_cntl &= ~LC_VOLTAGE_TIMER_SEL_MASK; |
speed_cntl &= ~LC_FORCE_DIS_HW_SPEED_CHANGE; |
speed_cntl |= LC_FORCE_EN_HW_SPEED_CHANGE; |
WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl); |
|
tmp = RREG32(0x541c); |
WREG32(0x541c, tmp | 0x8); |
WREG32(MM_CFGREGS_CNTL, MM_WR_TO_CFG_EN); |
link_cntl2 = RREG16(0x4088); |
link_cntl2 &= ~TARGET_LINK_SPEED_MASK; |
link_cntl2 |= 0x2; |
WREG16(0x4088, link_cntl2); |
WREG32(MM_CFGREGS_CNTL, 0); |
|
if ((rdev->family == CHIP_RV670) || |
(rdev->family == CHIP_RV620) || |
(rdev->family == CHIP_RV635)) { |
training_cntl = RREG32_PCIE_P(PCIE_LC_TRAINING_CNTL); |
training_cntl &= ~LC_POINT_7_PLUS_EN; |
WREG32_PCIE_P(PCIE_LC_TRAINING_CNTL, training_cntl); |
} else { |
speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL); |
speed_cntl &= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN; |
WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl); |
} |
|
speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL); |
speed_cntl |= LC_GEN2_EN_STRAP; |
WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl); |
|
} else { |
link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL); |
/* XXX: only disable it if gen1 bridge vendor == 0x111d or 0x1106 */ |
if (1) |
link_width_cntl |= LC_UPCONFIGURE_DIS; |
else |
link_width_cntl &= ~LC_UPCONFIGURE_DIS; |
WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl); |
} |
} |