26,15 → 26,18 |
* Jerome Glisse |
*/ |
#include <linux/seq_file.h> |
#include <linux/slab.h> |
#include "drmP.h" |
#include "drm.h" |
#include "radeon_drm.h" |
#include "radeon_reg.h" |
#include "radeon.h" |
#include "radeon_asic.h" |
#include "r100d.h" |
#include "rs100d.h" |
#include "rv200d.h" |
#include "rv250d.h" |
#include "atom.h" |
|
#include <linux/firmware.h> |
|
63,6 → 66,34 |
* r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 |
*/ |
|
u32 r100_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base) |
{ |
struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; |
u32 tmp = ((u32)crtc_base) | RADEON_CRTC_OFFSET__OFFSET_LOCK; |
|
/* Lock the graphics update lock */ |
/* update the scanout addresses */ |
WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, tmp); |
|
/* Wait for update_pending to go high. */ |
while (!(RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) & RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET)); |
DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n"); |
|
/* Unlock the lock, so double-buffering can take place inside vblank */ |
tmp &= ~RADEON_CRTC_OFFSET__OFFSET_LOCK; |
WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, tmp); |
|
/* Return current update_pending status: */ |
return RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) & RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET; |
} |
bool r100_gui_idle(struct radeon_device *rdev) |
{ |
if (RREG32(RADEON_RBBM_STATUS) & RADEON_RBBM_ACTIVE) |
return false; |
else |
return true; |
} |
|
/* hpd for digital panel detect/disconnect */ |
bool r100_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd) |
{ |
169,7 → 200,7 |
int r; |
|
if (rdev->gart.table.ram.ptr) { |
WARN(1, "R100 PCI GART already initialized.\n"); |
WARN(1, "R100 PCI GART already initialized\n"); |
return 0; |
} |
/* Initialize common gart structure */ |
233,9 → 264,9 |
|
void r100_pci_gart_fini(struct radeon_device *rdev) |
{ |
radeon_gart_fini(rdev); |
r100_pci_gart_disable(rdev); |
radeon_gart_table_ram_free(rdev); |
radeon_gart_fini(rdev); |
} |
|
|
250,6 → 281,7 |
WREG32(R_000044_GEN_INT_STATUS, tmp); |
} |
|
#if 0 |
static inline uint32_t r100_irq_ack(struct radeon_device *rdev) |
{ |
uint32_t irqs = RREG32(RADEON_GEN_INT_STATUS); |
257,6 → 289,12 |
RADEON_CRTC_VBLANK_STAT | RADEON_CRTC2_VBLANK_STAT | |
RADEON_FP_DETECT_STAT | RADEON_FP2_DETECT_STAT; |
|
/* the interrupt works, but the status bit is permanently asserted */ |
if (rdev->irq.gui_idle && radeon_gui_idle(rdev)) { |
if (!rdev->irq.gui_idle_acked) |
irq_mask |= RADEON_GUI_IDLE_STAT; |
} |
|
if (irqs) { |
WREG32(RADEON_GEN_INT_STATUS, irqs); |
} |
263,7 → 301,9 |
return irqs & irq_mask; |
} |
|
#endif |
|
|
u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc) |
{ |
if (crtc == 0) |
299,70 → 339,7 |
} |
|
#if 0 |
/* |
* Writeback |
*/ |
int r100_wb_init(struct radeon_device *rdev) |
{ |
int r; |
|
if (rdev->wb.wb_obj == NULL) { |
r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, true, |
RADEON_GEM_DOMAIN_GTT, |
&rdev->wb.wb_obj); |
if (r) { |
dev_err(rdev->dev, "(%d) create WB buffer failed\n", r); |
return r; |
} |
r = radeon_bo_reserve(rdev->wb.wb_obj, false); |
if (unlikely(r != 0)) |
return r; |
r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT, |
&rdev->wb.gpu_addr); |
if (r) { |
dev_err(rdev->dev, "(%d) pin WB buffer failed\n", r); |
radeon_bo_unreserve(rdev->wb.wb_obj); |
return r; |
} |
r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb); |
radeon_bo_unreserve(rdev->wb.wb_obj); |
if (r) { |
dev_err(rdev->dev, "(%d) map WB buffer failed\n", r); |
return r; |
} |
} |
WREG32(R_000774_SCRATCH_ADDR, rdev->wb.gpu_addr); |
WREG32(R_00070C_CP_RB_RPTR_ADDR, |
S_00070C_RB_RPTR_ADDR((rdev->wb.gpu_addr + 1024) >> 2)); |
WREG32(R_000770_SCRATCH_UMSK, 0xff); |
return 0; |
} |
|
void r100_wb_disable(struct radeon_device *rdev) |
{ |
WREG32(R_000770_SCRATCH_UMSK, 0); |
} |
|
void r100_wb_fini(struct radeon_device *rdev) |
{ |
int r; |
|
r100_wb_disable(rdev); |
if (rdev->wb.wb_obj) { |
r = radeon_bo_reserve(rdev->wb.wb_obj, false); |
if (unlikely(r != 0)) { |
dev_err(rdev->dev, "(%d) can't finish WB\n", r); |
return; |
} |
radeon_bo_kunmap(rdev->wb.wb_obj); |
radeon_bo_unpin(rdev->wb.wb_obj); |
radeon_bo_unreserve(rdev->wb.wb_obj); |
radeon_bo_unref(&rdev->wb.wb_obj); |
rdev->wb.wb = NULL; |
rdev->wb.wb_obj = NULL; |
} |
} |
|
int r100_copy_blit(struct radeon_device *rdev, |
uint64_t src_offset, |
uint64_t dst_offset, |
479,7 → 456,7 |
const char *fw_name = NULL; |
int err; |
|
DRM_DEBUG("\n"); |
DRM_DEBUG_KMS("\n"); |
|
pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0); |
err = IS_ERR(pdev); |
582,26 → 559,6 |
if (r100_debugfs_cp_init(rdev)) { |
DRM_ERROR("Failed to register debugfs file for CP !\n"); |
} |
/* Reset CP */ |
tmp = RREG32(RADEON_CP_CSQ_STAT); |
if ((tmp & (1 << 31))) { |
DRM_INFO("radeon: cp busy (0x%08X) resetting\n", tmp); |
WREG32(RADEON_CP_CSQ_MODE, 0); |
WREG32(RADEON_CP_CSQ_CNTL, 0); |
WREG32(RADEON_RBBM_SOFT_RESET, RADEON_SOFT_RESET_CP); |
tmp = RREG32(RADEON_RBBM_SOFT_RESET); |
mdelay(2); |
WREG32(RADEON_RBBM_SOFT_RESET, 0); |
tmp = RREG32(RADEON_RBBM_SOFT_RESET); |
mdelay(2); |
tmp = RREG32(RADEON_CP_CSQ_STAT); |
if ((tmp & (1 << 31))) { |
DRM_INFO("radeon: cp reset failed (0x%08X)\n", tmp); |
} |
} else { |
DRM_INFO("radeon: cp idle (0x%08X)\n", tmp); |
} |
|
if (!rdev->me_fw) { |
r = r100_cp_init_microcode(rdev); |
if (r) { |
646,30 → 603,44 |
WREG32(0x718, pre_write_timer | (pre_write_limit << 28)); |
tmp = (REG_SET(RADEON_RB_BUFSZ, rb_bufsz) | |
REG_SET(RADEON_RB_BLKSZ, rb_blksz) | |
REG_SET(RADEON_MAX_FETCH, max_fetch) | |
RADEON_RB_NO_UPDATE); |
REG_SET(RADEON_MAX_FETCH, max_fetch)); |
#ifdef __BIG_ENDIAN |
tmp |= RADEON_BUF_SWAP_32BIT; |
#endif |
WREG32(RADEON_CP_RB_CNTL, tmp); |
WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_NO_UPDATE); |
|
/* Set ring address */ |
DRM_INFO("radeon: ring at 0x%016lX\n", (unsigned long)rdev->cp.gpu_addr); |
WREG32(RADEON_CP_RB_BASE, rdev->cp.gpu_addr); |
/* Force read & write ptr to 0 */ |
WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA); |
WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA | RADEON_RB_NO_UPDATE); |
WREG32(RADEON_CP_RB_RPTR_WR, 0); |
WREG32(RADEON_CP_RB_WPTR, 0); |
|
/* set the wb address whether it's enabled or not */ |
WREG32(R_00070C_CP_RB_RPTR_ADDR, |
S_00070C_RB_RPTR_ADDR((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) >> 2)); |
WREG32(R_000774_SCRATCH_ADDR, rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET); |
|
if (rdev->wb.enabled) |
WREG32(R_000770_SCRATCH_UMSK, 0xff); |
else { |
tmp |= RADEON_RB_NO_UPDATE; |
WREG32(R_000770_SCRATCH_UMSK, 0); |
} |
|
WREG32(RADEON_CP_RB_CNTL, tmp); |
udelay(10); |
rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR); |
rdev->cp.wptr = RREG32(RADEON_CP_RB_WPTR); |
/* protect against crazy HW on resume */ |
rdev->cp.wptr &= rdev->cp.ptr_mask; |
/* Set cp mode to bus mastering & enable cp*/ |
WREG32(RADEON_CP_CSQ_MODE, |
REG_SET(RADEON_INDIRECT2_START, indirect2_start) | |
REG_SET(RADEON_INDIRECT1_START, indirect1_start)); |
WREG32(0x718, 0); |
WREG32(0x744, 0x00004D4D); |
WREG32(RADEON_CP_RB_WPTR_DELAY, 0); |
WREG32(RADEON_CP_CSQ_MODE, 0x00004D4D); |
WREG32(RADEON_CP_CSQ_CNTL, RADEON_CSQ_PRIBM_INDBM); |
radeon_ring_start(rdev); |
r = radeon_ring_test(rdev); |
678,6 → 649,7 |
return r; |
} |
rdev->cp.ready = true; |
// radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size); |
return 0; |
} |
|
695,9 → 667,11 |
void r100_cp_disable(struct radeon_device *rdev) |
{ |
/* Disable ring */ |
// radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); |
rdev->cp.ready = false; |
WREG32(RADEON_CP_CSQ_MODE, 0); |
WREG32(RADEON_CP_CSQ_CNTL, 0); |
WREG32(R_000770_SCRATCH_UMSK, 0); |
if (r100_gui_wait_for_idle(rdev)) { |
printk(KERN_WARNING "Failed to wait GUI idle while " |
"programming pipes. Bad things might happen.\n"); |
704,39 → 678,6 |
} |
} |
|
int r100_cp_reset(struct radeon_device *rdev) |
{ |
uint32_t tmp; |
bool reinit_cp; |
int i; |
|
reinit_cp = rdev->cp.ready; |
rdev->cp.ready = false; |
WREG32(RADEON_CP_CSQ_MODE, 0); |
WREG32(RADEON_CP_CSQ_CNTL, 0); |
WREG32(RADEON_RBBM_SOFT_RESET, RADEON_SOFT_RESET_CP); |
(void)RREG32(RADEON_RBBM_SOFT_RESET); |
udelay(200); |
WREG32(RADEON_RBBM_SOFT_RESET, 0); |
/* Wait to prevent race in RBBM_STATUS */ |
mdelay(1); |
for (i = 0; i < rdev->usec_timeout; i++) { |
tmp = RREG32(RADEON_RBBM_STATUS); |
if (!(tmp & (1 << 16))) { |
DRM_INFO("CP reset succeed (RBBM_STATUS=0x%08X)\n", |
tmp); |
if (reinit_cp) { |
return r100_cp_init(rdev, rdev->cp.ring_size); |
} |
return 0; |
} |
DRM_UDELAY(1); |
} |
tmp = RREG32(RADEON_RBBM_STATUS); |
DRM_ERROR("Failed to reset CP (RBBM_STATUS=0x%08X)!\n", tmp); |
return -1; |
} |
|
void r100_cp_commit(struct radeon_device *rdev) |
{ |
WREG32(RADEON_CP_RB_WPTR, rdev->cp.wptr); |
890,14 → 831,12 |
if (waitreloc.reg != RADEON_WAIT_UNTIL || |
waitreloc.count != 0) { |
DRM_ERROR("vline wait had illegal wait until segment\n"); |
r = -EINVAL; |
return r; |
return -EINVAL; |
} |
|
if (radeon_get_ib_value(p, waitreloc.idx + 1) != RADEON_WAIT_CRTC_VLINE) { |
DRM_ERROR("vline wait had illegal wait until\n"); |
r = -EINVAL; |
return r; |
return -EINVAL; |
} |
|
/* jump over the NOP */ |
912,12 → 851,10 |
header = radeon_get_ib_value(p, h_idx); |
crtc_id = radeon_get_ib_value(p, h_idx + 5); |
reg = CP_PACKET0_GET_REG(header); |
mutex_lock(&p->rdev->ddev->mode_config.mutex); |
obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC); |
if (!obj) { |
DRM_ERROR("cannot find crtc %d\n", crtc_id); |
r = -EINVAL; |
goto out; |
return -EINVAL; |
} |
crtc = obj_to_crtc(obj); |
radeon_crtc = to_radeon_crtc(crtc); |
939,15 → 876,13 |
break; |
default: |
DRM_ERROR("unknown crtc reloc\n"); |
r = -EINVAL; |
goto out; |
return -EINVAL; |
} |
ib[h_idx] = header; |
ib[h_idx + 3] |= RADEON_ENG_DISPLAY_SELECT_CRTC1; |
} |
out: |
mutex_unlock(&p->rdev->ddev->mode_config.mutex); |
return r; |
|
return 0; |
} |
|
/** |
1097,6 → 1032,7 |
} |
track->zb.robj = reloc->robj; |
track->zb.offset = idx_value; |
track->zb_dirty = true; |
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); |
break; |
case RADEON_RB3D_COLOROFFSET: |
1109,6 → 1045,7 |
} |
track->cb[0].robj = reloc->robj; |
track->cb[0].offset = idx_value; |
track->cb_dirty = true; |
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); |
break; |
case RADEON_PP_TXOFFSET_0: |
1124,6 → 1061,7 |
} |
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); |
track->textures[i].robj = reloc->robj; |
track->tex_dirty = true; |
break; |
case RADEON_PP_CUBIC_OFFSET_T0_0: |
case RADEON_PP_CUBIC_OFFSET_T0_1: |
1141,6 → 1079,7 |
track->textures[0].cube_info[i].offset = idx_value; |
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); |
track->textures[0].cube_info[i].robj = reloc->robj; |
track->tex_dirty = true; |
break; |
case RADEON_PP_CUBIC_OFFSET_T1_0: |
case RADEON_PP_CUBIC_OFFSET_T1_1: |
1158,6 → 1097,7 |
track->textures[1].cube_info[i].offset = idx_value; |
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); |
track->textures[1].cube_info[i].robj = reloc->robj; |
track->tex_dirty = true; |
break; |
case RADEON_PP_CUBIC_OFFSET_T2_0: |
case RADEON_PP_CUBIC_OFFSET_T2_1: |
1175,9 → 1115,12 |
track->textures[2].cube_info[i].offset = idx_value; |
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); |
track->textures[2].cube_info[i].robj = reloc->robj; |
track->tex_dirty = true; |
break; |
case RADEON_RE_WIDTH_HEIGHT: |
track->maxy = ((idx_value >> 16) & 0x7FF); |
track->cb_dirty = true; |
track->zb_dirty = true; |
break; |
case RADEON_RB3D_COLORPITCH: |
r = r100_cs_packet_next_reloc(p, &reloc); |
1198,9 → 1141,11 |
ib[idx] = tmp; |
|
track->cb[0].pitch = idx_value & RADEON_COLORPITCH_MASK; |
track->cb_dirty = true; |
break; |
case RADEON_RB3D_DEPTHPITCH: |
track->zb.pitch = idx_value & RADEON_DEPTHPITCH_MASK; |
track->zb_dirty = true; |
break; |
case RADEON_RB3D_CNTL: |
switch ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f) { |
1225,6 → 1170,8 |
return -EINVAL; |
} |
track->z_enabled = !!(idx_value & RADEON_Z_ENABLE); |
track->cb_dirty = true; |
track->zb_dirty = true; |
break; |
case RADEON_RB3D_ZSTENCILCNTL: |
switch (idx_value & 0xf) { |
1242,6 → 1189,7 |
default: |
break; |
} |
track->zb_dirty = true; |
break; |
case RADEON_RB3D_ZPASS_ADDR: |
r = r100_cs_packet_next_reloc(p, &reloc); |
1258,6 → 1206,7 |
uint32_t temp = idx_value >> 4; |
for (i = 0; i < track->num_texture; i++) |
track->textures[i].enabled = !!(temp & (1 << i)); |
track->tex_dirty = true; |
} |
break; |
case RADEON_SE_VF_CNTL: |
1272,6 → 1221,7 |
i = (reg - RADEON_PP_TEX_SIZE_0) / 8; |
track->textures[i].width = (idx_value & RADEON_TEX_USIZE_MASK) + 1; |
track->textures[i].height = ((idx_value & RADEON_TEX_VSIZE_MASK) >> RADEON_TEX_VSIZE_SHIFT) + 1; |
track->tex_dirty = true; |
break; |
case RADEON_PP_TEX_PITCH_0: |
case RADEON_PP_TEX_PITCH_1: |
1278,6 → 1228,7 |
case RADEON_PP_TEX_PITCH_2: |
i = (reg - RADEON_PP_TEX_PITCH_0) / 8; |
track->textures[i].pitch = idx_value + 32; |
track->tex_dirty = true; |
break; |
case RADEON_PP_TXFILTER_0: |
case RADEON_PP_TXFILTER_1: |
1291,6 → 1242,7 |
tmp = (idx_value >> 27) & 0x7; |
if (tmp == 2 || tmp == 6) |
track->textures[i].roundup_h = false; |
track->tex_dirty = true; |
break; |
case RADEON_PP_TXFORMAT_0: |
case RADEON_PP_TXFORMAT_1: |
1310,6 → 1262,7 |
case RADEON_TXFORMAT_RGB332: |
case RADEON_TXFORMAT_Y8: |
track->textures[i].cpp = 1; |
track->textures[i].compress_format = R100_TRACK_COMP_NONE; |
break; |
case RADEON_TXFORMAT_AI88: |
case RADEON_TXFORMAT_ARGB1555: |
1321,6 → 1274,7 |
case RADEON_TXFORMAT_LDUDV655: |
case RADEON_TXFORMAT_DUDV88: |
track->textures[i].cpp = 2; |
track->textures[i].compress_format = R100_TRACK_COMP_NONE; |
break; |
case RADEON_TXFORMAT_ARGB8888: |
case RADEON_TXFORMAT_RGBA8888: |
1327,6 → 1281,7 |
case RADEON_TXFORMAT_SHADOW32: |
case RADEON_TXFORMAT_LDUDUV8888: |
track->textures[i].cpp = 4; |
track->textures[i].compress_format = R100_TRACK_COMP_NONE; |
break; |
case RADEON_TXFORMAT_DXT1: |
track->textures[i].cpp = 1; |
1340,6 → 1295,7 |
} |
track->textures[i].cube_info[4].width = 1 << ((idx_value >> 16) & 0xf); |
track->textures[i].cube_info[4].height = 1 << ((idx_value >> 20) & 0xf); |
track->tex_dirty = true; |
break; |
case RADEON_PP_CUBIC_FACES_0: |
case RADEON_PP_CUBIC_FACES_1: |
1350,6 → 1306,7 |
track->textures[i].cube_info[face].width = 1 << ((tmp >> (face * 8)) & 0xf); |
track->textures[i].cube_info[face].height = 1 << ((tmp >> ((face * 8) + 4)) & 0xf); |
} |
track->tex_dirty = true; |
break; |
default: |
printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n", |
1484,6 → 1441,11 |
return r; |
break; |
/* triggers drawing using indices to vertex buffer */ |
case PACKET3_3D_CLEAR_HIZ: |
case PACKET3_3D_CLEAR_ZMASK: |
if (p->rdev->hyperz_filp != p->filp) |
return -EINVAL; |
break; |
case PACKET3_NOP: |
break; |
default: |
1652,83 → 1614,164 |
return -1; |
} |
|
void r100_gpu_init(struct radeon_device *rdev) |
void r100_gpu_lockup_update(struct r100_gpu_lockup *lockup, struct radeon_cp *cp) |
{ |
/* TODO: anythings to do here ? pipes ? */ |
r100_hdp_reset(rdev); |
lockup->last_cp_rptr = cp->rptr; |
lockup->last_jiffies = 0; //jiffies; |
} |
|
void r100_hdp_reset(struct radeon_device *rdev) |
/** |
* r100_gpu_cp_is_lockup() - check if CP is lockup by recording information |
* @rdev: radeon device structure |
* @lockup: r100_gpu_lockup structure holding CP lockup tracking informations |
* @cp: radeon_cp structure holding CP information |
* |
* We don't need to initialize the lockup tracking information as we will either |
* have CP rptr to a different value of jiffies wrap around which will force |
* initialization of the lockup tracking informations. |
* |
* A possible false positivie is if we get call after while and last_cp_rptr == |
* the current CP rptr, even if it's unlikely it might happen. To avoid this |
* if the elapsed time since last call is bigger than 2 second than we return |
* false and update the tracking information. Due to this the caller must call |
* r100_gpu_cp_is_lockup several time in less than 2sec for lockup to be reported |
* the fencing code should be cautious about that. |
* |
* Caller should write to the ring to force CP to do something so we don't get |
* false positive when CP is just gived nothing to do. |
* |
**/ |
bool r100_gpu_cp_is_lockup(struct radeon_device *rdev, struct r100_gpu_lockup *lockup, struct radeon_cp *cp) |
{ |
uint32_t tmp; |
unsigned long cjiffies, elapsed; |
|
tmp = RREG32(RADEON_HOST_PATH_CNTL) & RADEON_HDP_APER_CNTL; |
tmp |= (7 << 28); |
WREG32(RADEON_HOST_PATH_CNTL, tmp | RADEON_HDP_SOFT_RESET | RADEON_HDP_READ_BUFFER_INVALIDATE); |
(void)RREG32(RADEON_HOST_PATH_CNTL); |
udelay(200); |
WREG32(RADEON_RBBM_SOFT_RESET, 0); |
WREG32(RADEON_HOST_PATH_CNTL, tmp); |
(void)RREG32(RADEON_HOST_PATH_CNTL); |
#if 0 |
cjiffies = jiffies; |
if (!time_after(cjiffies, lockup->last_jiffies)) { |
/* likely a wrap around */ |
lockup->last_cp_rptr = cp->rptr; |
lockup->last_jiffies = jiffies; |
return false; |
} |
if (cp->rptr != lockup->last_cp_rptr) { |
/* CP is still working no lockup */ |
lockup->last_cp_rptr = cp->rptr; |
lockup->last_jiffies = jiffies; |
return false; |
} |
elapsed = jiffies_to_msecs(cjiffies - lockup->last_jiffies); |
if (elapsed >= 10000) { |
dev_err(rdev->dev, "GPU lockup CP stall for more than %lumsec\n", elapsed); |
return true; |
} |
#endif |
|
int r100_rb2d_reset(struct radeon_device *rdev) |
/* give a chance to the GPU ... */ |
return false; |
} |
|
bool r100_gpu_is_lockup(struct radeon_device *rdev) |
{ |
uint32_t tmp; |
int i; |
u32 rbbm_status; |
int r; |
|
WREG32(RADEON_RBBM_SOFT_RESET, RADEON_SOFT_RESET_E2); |
(void)RREG32(RADEON_RBBM_SOFT_RESET); |
udelay(200); |
WREG32(RADEON_RBBM_SOFT_RESET, 0); |
/* Wait to prevent race in RBBM_STATUS */ |
mdelay(1); |
for (i = 0; i < rdev->usec_timeout; i++) { |
tmp = RREG32(RADEON_RBBM_STATUS); |
if (!(tmp & (1 << 26))) { |
DRM_INFO("RB2D reset succeed (RBBM_STATUS=0x%08X)\n", |
tmp); |
return 0; |
rbbm_status = RREG32(R_000E40_RBBM_STATUS); |
if (!G_000E40_GUI_ACTIVE(rbbm_status)) { |
r100_gpu_lockup_update(&rdev->config.r100.lockup, &rdev->cp); |
return false; |
} |
DRM_UDELAY(1); |
/* force CP activities */ |
r = radeon_ring_lock(rdev, 2); |
if (!r) { |
/* PACKET2 NOP */ |
radeon_ring_write(rdev, 0x80000000); |
radeon_ring_write(rdev, 0x80000000); |
radeon_ring_unlock_commit(rdev); |
} |
tmp = RREG32(RADEON_RBBM_STATUS); |
DRM_ERROR("Failed to reset RB2D (RBBM_STATUS=0x%08X)!\n", tmp); |
return -1; |
rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR); |
return r100_gpu_cp_is_lockup(rdev, &rdev->config.r100.lockup, &rdev->cp); |
} |
|
int r100_gpu_reset(struct radeon_device *rdev) |
void r100_bm_disable(struct radeon_device *rdev) |
{ |
uint32_t status; |
u32 tmp; |
|
/* reset order likely matter */ |
status = RREG32(RADEON_RBBM_STATUS); |
/* reset HDP */ |
r100_hdp_reset(rdev); |
/* reset rb2d */ |
if (status & ((1 << 17) | (1 << 18) | (1 << 27))) { |
r100_rb2d_reset(rdev); |
/* disable bus mastering */ |
tmp = RREG32(R_000030_BUS_CNTL); |
WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000044); |
mdelay(1); |
WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000042); |
mdelay(1); |
WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000040); |
tmp = RREG32(RADEON_BUS_CNTL); |
mdelay(1); |
tmp = PciRead16(rdev->pdev->bus, rdev->pdev->devfn, 0x4); |
PciWrite16(rdev->pdev->bus, rdev->pdev->devfn, 0x4, tmp & 0xFFFB); |
mdelay(1); |
} |
/* TODO: reset 3D engine */ |
|
int r100_asic_reset(struct radeon_device *rdev) |
{ |
struct r100_mc_save save; |
u32 status, tmp; |
int ret = 0; |
|
status = RREG32(R_000E40_RBBM_STATUS); |
if (!G_000E40_GUI_ACTIVE(status)) { |
return 0; |
} |
r100_mc_stop(rdev, &save); |
status = RREG32(R_000E40_RBBM_STATUS); |
dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); |
/* stop CP */ |
WREG32(RADEON_CP_CSQ_CNTL, 0); |
tmp = RREG32(RADEON_CP_RB_CNTL); |
WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA); |
WREG32(RADEON_CP_RB_RPTR_WR, 0); |
WREG32(RADEON_CP_RB_WPTR, 0); |
WREG32(RADEON_CP_RB_CNTL, tmp); |
/* save PCI state */ |
// pci_save_state(rdev->pdev); |
/* disable bus mastering */ |
r100_bm_disable(rdev); |
WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_SE(1) | |
S_0000F0_SOFT_RESET_RE(1) | |
S_0000F0_SOFT_RESET_PP(1) | |
S_0000F0_SOFT_RESET_RB(1)); |
RREG32(R_0000F0_RBBM_SOFT_RESET); |
mdelay(500); |
WREG32(R_0000F0_RBBM_SOFT_RESET, 0); |
mdelay(1); |
status = RREG32(R_000E40_RBBM_STATUS); |
dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); |
/* reset CP */ |
status = RREG32(RADEON_RBBM_STATUS); |
if (status & (1 << 16)) { |
r100_cp_reset(rdev); |
} |
WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_CP(1)); |
RREG32(R_0000F0_RBBM_SOFT_RESET); |
mdelay(500); |
WREG32(R_0000F0_RBBM_SOFT_RESET, 0); |
mdelay(1); |
status = RREG32(R_000E40_RBBM_STATUS); |
dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); |
/* restore PCI & busmastering */ |
// pci_restore_state(rdev->pdev); |
r100_enable_bm(rdev); |
/* Check if GPU is idle */ |
status = RREG32(RADEON_RBBM_STATUS); |
if (status & RADEON_RBBM_ACTIVE) { |
DRM_ERROR("Failed to reset GPU (RBBM_STATUS=0x%08X)\n", status); |
return -1; |
if (G_000E40_SE_BUSY(status) || G_000E40_RE_BUSY(status) || |
G_000E40_TAM_BUSY(status) || G_000E40_PB_BUSY(status)) { |
dev_err(rdev->dev, "failed to reset GPU\n"); |
rdev->gpu_lockup = true; |
ret = -1; |
} else |
dev_info(rdev->dev, "GPU reset succeed\n"); |
r100_mc_resume(rdev, &save); |
return ret; |
} |
DRM_INFO("GPU reset succeed (RBBM_STATUS=0x%08X)\n", status); |
return 0; |
} |
|
void r100_set_common_regs(struct radeon_device *rdev) |
{ |
struct drm_device *dev = rdev->ddev; |
bool force_dac2 = false; |
u32 tmp; |
|
/* set these so they don't interfere with anything */ |
WREG32(RADEON_OV0_SCALE_CNTL, 0); |
1800,6 → 1843,12 |
WREG32(RADEON_DISP_HW_DEBUG, disp_hw_debug); |
WREG32(RADEON_DAC_CNTL2, dac2_cntl); |
} |
|
/* switch PM block to ACPI mode */ |
tmp = RREG32_PLL(RADEON_PLL_PWRMGT_CNTL); |
tmp &= ~RADEON_PM_MODE_SEL; |
WREG32_PLL(RADEON_PLL_PWRMGT_CNTL, tmp); |
|
} |
|
/* |
1883,8 → 1932,8 |
u64 config_aper_size; |
|
/* work out accessible VRAM */ |
rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0); |
rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0); |
rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0); |
rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0); |
rdev->mc.visible_vram_size = r100_get_accessible_vram(rdev); |
/* FIXME we don't use the second aperture yet when we could use it */ |
if (rdev->mc.visible_vram_size > rdev->mc.aper_size) |
1909,17 → 1958,15 |
/* Fix for RN50, M6, M7 with 8/16/32(??) MBs of VRAM - |
* Novell bug 204882 + along with lots of ubuntu ones |
*/ |
if (rdev->mc.aper_size > config_aper_size) |
config_aper_size = rdev->mc.aper_size; |
|
if (config_aper_size > rdev->mc.real_vram_size) |
rdev->mc.mc_vram_size = config_aper_size; |
else |
rdev->mc.mc_vram_size = rdev->mc.real_vram_size; |
} |
/* FIXME remove this once we support unmappable VRAM */ |
if (rdev->mc.mc_vram_size > rdev->mc.aper_size) { |
rdev->mc.mc_vram_size = rdev->mc.aper_size; |
rdev->mc.real_vram_size = rdev->mc.aper_size; |
} |
} |
|
void r100_vga_set_state(struct radeon_device *rdev, bool state) |
{ |
1927,10 → 1974,10 |
|
temp = RREG32(RADEON_CONFIG_CNTL); |
if (state == false) { |
temp &= ~(1<<8); |
temp |= (1<<9); |
temp &= ~RADEON_CFG_VGA_RAM_EN; |
temp |= RADEON_CFG_VGA_IO_DIS; |
} else { |
temp &= ~(1<<9); |
temp &= ~RADEON_CFG_VGA_IO_DIS; |
} |
WREG32(RADEON_CONFIG_CNTL, temp); |
} |
1945,8 → 1992,10 |
if (rdev->flags & RADEON_IS_IGP) |
base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16; |
radeon_vram_location(rdev, &rdev->mc, base); |
rdev->mc.gtt_base_align = 0; |
if (!(rdev->flags & RADEON_IS_AGP)) |
radeon_gtt_location(rdev, &rdev->mc); |
radeon_update_bandwidth_info(rdev); |
} |
|
|
1955,12 → 2004,11 |
*/ |
void r100_pll_errata_after_index(struct radeon_device *rdev) |
{ |
if (!(rdev->pll_errata & CHIP_ERRATA_PLL_DUMMYREADS)) { |
return; |
} |
if (rdev->pll_errata & CHIP_ERRATA_PLL_DUMMYREADS) { |
(void)RREG32(RADEON_CLOCK_CNTL_DATA); |
(void)RREG32(RADEON_CRTC_GEN_CNTL); |
} |
} |
|
static void r100_pll_errata_after_data(struct radeon_device *rdev) |
{ |
2197,12 → 2245,6 |
int surf_index = reg * 16; |
int flags = 0; |
|
/* r100/r200 divide by 16 */ |
if (rdev->family < CHIP_R300) |
flags = pitch / 16; |
else |
flags = pitch / 8; |
|
if (rdev->family <= CHIP_RS200) { |
if ((tiling_flags & (RADEON_TILING_MACRO|RADEON_TILING_MICRO)) |
== (RADEON_TILING_MACRO|RADEON_TILING_MICRO)) |
2226,7 → 2268,21 |
if (tiling_flags & RADEON_TILING_SWAP_32BIT) |
flags |= RADEON_SURF_AP0_SWP_32BPP | RADEON_SURF_AP1_SWP_32BPP; |
|
DRM_DEBUG("writing surface %d %d %x %x\n", reg, flags, offset, offset+obj_size-1); |
/* when we aren't tiling the pitch seems to needs to be furtherdivided down. - tested on power5 + rn50 server */ |
if (tiling_flags & (RADEON_TILING_SWAP_16BIT | RADEON_TILING_SWAP_32BIT)) { |
if (!(tiling_flags & (RADEON_TILING_MACRO | RADEON_TILING_MICRO))) |
if (ASIC_IS_RN50(rdev)) |
pitch /= 16; |
} |
|
/* r100/r200 divide by 16 */ |
if (rdev->family < CHIP_R300) |
flags |= pitch / 16; |
else |
flags |= pitch / 8; |
|
|
DRM_DEBUG_KMS("writing surface %d %d %x %x\n", reg, flags, offset, offset+obj_size-1); |
WREG32(RADEON_SURFACE0_INFO + surf_index, flags); |
WREG32(RADEON_SURFACE0_LOWER_BOUND + surf_index, offset); |
WREG32(RADEON_SURFACE0_UPPER_BOUND + surf_index, offset + obj_size - 1); |
2246,53 → 2302,53 |
fixed20_12 peak_disp_bw, mem_bw, pix_clk, pix_clk2, temp_ff, crit_point_ff; |
uint32_t temp, data, mem_trcd, mem_trp, mem_tras; |
fixed20_12 memtcas_ff[8] = { |
fixed_init(1), |
fixed_init(2), |
fixed_init(3), |
fixed_init(0), |
fixed_init_half(1), |
fixed_init_half(2), |
fixed_init(0), |
dfixed_init(1), |
dfixed_init(2), |
dfixed_init(3), |
dfixed_init(0), |
dfixed_init_half(1), |
dfixed_init_half(2), |
dfixed_init(0), |
}; |
fixed20_12 memtcas_rs480_ff[8] = { |
fixed_init(0), |
fixed_init(1), |
fixed_init(2), |
fixed_init(3), |
fixed_init(0), |
fixed_init_half(1), |
fixed_init_half(2), |
fixed_init_half(3), |
dfixed_init(0), |
dfixed_init(1), |
dfixed_init(2), |
dfixed_init(3), |
dfixed_init(0), |
dfixed_init_half(1), |
dfixed_init_half(2), |
dfixed_init_half(3), |
}; |
fixed20_12 memtcas2_ff[8] = { |
fixed_init(0), |
fixed_init(1), |
fixed_init(2), |
fixed_init(3), |
fixed_init(4), |
fixed_init(5), |
fixed_init(6), |
fixed_init(7), |
dfixed_init(0), |
dfixed_init(1), |
dfixed_init(2), |
dfixed_init(3), |
dfixed_init(4), |
dfixed_init(5), |
dfixed_init(6), |
dfixed_init(7), |
}; |
fixed20_12 memtrbs[8] = { |
fixed_init(1), |
fixed_init_half(1), |
fixed_init(2), |
fixed_init_half(2), |
fixed_init(3), |
fixed_init_half(3), |
fixed_init(4), |
fixed_init_half(4) |
dfixed_init(1), |
dfixed_init_half(1), |
dfixed_init(2), |
dfixed_init_half(2), |
dfixed_init(3), |
dfixed_init_half(3), |
dfixed_init(4), |
dfixed_init_half(4) |
}; |
fixed20_12 memtrbs_r4xx[8] = { |
fixed_init(4), |
fixed_init(5), |
fixed_init(6), |
fixed_init(7), |
fixed_init(8), |
fixed_init(9), |
fixed_init(10), |
fixed_init(11) |
dfixed_init(4), |
dfixed_init(5), |
dfixed_init(6), |
dfixed_init(7), |
dfixed_init(8), |
dfixed_init(9), |
dfixed_init(10), |
dfixed_init(11) |
}; |
fixed20_12 min_mem_eff; |
fixed20_12 mc_latency_sclk, mc_latency_mclk, k1; |
2310,6 → 2366,8 |
uint32_t pixel_bytes1 = 0; |
uint32_t pixel_bytes2 = 0; |
|
radeon_update_display_priority(rdev); |
|
if (rdev->mode_info.crtcs[0]->base.enabled) { |
mode1 = &rdev->mode_info.crtcs[0]->base.mode; |
pixel_bytes1 = rdev->mode_info.crtcs[0]->base.fb->bits_per_pixel / 8; |
2321,7 → 2379,7 |
} |
} |
|
min_mem_eff.full = rfixed_const_8(0); |
min_mem_eff.full = dfixed_const_8(0); |
/* get modes */ |
if ((rdev->disp_priority == 2) && ASIC_IS_R300(rdev)) { |
uint32_t mc_init_misc_lat_timer = RREG32(R300_MC_INIT_MISC_LAT_TIMER); |
2338,35 → 2396,32 |
/* |
* determine is there is enough bw for current mode |
*/ |
mclk_ff.full = rfixed_const(rdev->clock.default_mclk); |
temp_ff.full = rfixed_const(100); |
mclk_ff.full = rfixed_div(mclk_ff, temp_ff); |
sclk_ff.full = rfixed_const(rdev->clock.default_sclk); |
sclk_ff.full = rfixed_div(sclk_ff, temp_ff); |
sclk_ff = rdev->pm.sclk; |
mclk_ff = rdev->pm.mclk; |
|
temp = (rdev->mc.vram_width / 8) * (rdev->mc.vram_is_ddr ? 2 : 1); |
temp_ff.full = rfixed_const(temp); |
mem_bw.full = rfixed_mul(mclk_ff, temp_ff); |
temp_ff.full = dfixed_const(temp); |
mem_bw.full = dfixed_mul(mclk_ff, temp_ff); |
|
pix_clk.full = 0; |
pix_clk2.full = 0; |
peak_disp_bw.full = 0; |
if (mode1) { |
temp_ff.full = rfixed_const(1000); |
pix_clk.full = rfixed_const(mode1->clock); /* convert to fixed point */ |
pix_clk.full = rfixed_div(pix_clk, temp_ff); |
temp_ff.full = rfixed_const(pixel_bytes1); |
peak_disp_bw.full += rfixed_mul(pix_clk, temp_ff); |
temp_ff.full = dfixed_const(1000); |
pix_clk.full = dfixed_const(mode1->clock); /* convert to fixed point */ |
pix_clk.full = dfixed_div(pix_clk, temp_ff); |
temp_ff.full = dfixed_const(pixel_bytes1); |
peak_disp_bw.full += dfixed_mul(pix_clk, temp_ff); |
} |
if (mode2) { |
temp_ff.full = rfixed_const(1000); |
pix_clk2.full = rfixed_const(mode2->clock); /* convert to fixed point */ |
pix_clk2.full = rfixed_div(pix_clk2, temp_ff); |
temp_ff.full = rfixed_const(pixel_bytes2); |
peak_disp_bw.full += rfixed_mul(pix_clk2, temp_ff); |
temp_ff.full = dfixed_const(1000); |
pix_clk2.full = dfixed_const(mode2->clock); /* convert to fixed point */ |
pix_clk2.full = dfixed_div(pix_clk2, temp_ff); |
temp_ff.full = dfixed_const(pixel_bytes2); |
peak_disp_bw.full += dfixed_mul(pix_clk2, temp_ff); |
} |
|
mem_bw.full = rfixed_mul(mem_bw, min_mem_eff); |
mem_bw.full = dfixed_mul(mem_bw, min_mem_eff); |
if (peak_disp_bw.full >= mem_bw.full) { |
DRM_ERROR("You may not have enough display bandwidth for current mode\n" |
"If you have flickering problem, try to lower resolution, refresh rate, or color depth\n"); |
2408,9 → 2463,9 |
mem_tras = ((temp >> 12) & 0xf) + 4; |
} |
/* convert to FF */ |
trcd_ff.full = rfixed_const(mem_trcd); |
trp_ff.full = rfixed_const(mem_trp); |
tras_ff.full = rfixed_const(mem_tras); |
trcd_ff.full = dfixed_const(mem_trcd); |
trp_ff.full = dfixed_const(mem_trp); |
tras_ff.full = dfixed_const(mem_tras); |
|
/* Get values from the MEM_SDRAM_MODE_REG register...converting its */ |
temp = RREG32(RADEON_MEM_SDRAM_MODE_REG); |
2428,7 → 2483,7 |
/* extra cas latency stored in bits 23-25 0-4 clocks */ |
data = (temp >> 23) & 0x7; |
if (data < 5) |
tcas_ff.full += rfixed_const(data); |
tcas_ff.full += dfixed_const(data); |
} |
|
if (ASIC_IS_R300(rdev) && !(rdev->flags & RADEON_IS_IGP)) { |
2465,72 → 2520,72 |
|
if (rdev->flags & RADEON_IS_AGP) { |
fixed20_12 agpmode_ff; |
agpmode_ff.full = rfixed_const(radeon_agpmode); |
temp_ff.full = rfixed_const_666(16); |
sclk_eff_ff.full -= rfixed_mul(agpmode_ff, temp_ff); |
agpmode_ff.full = dfixed_const(radeon_agpmode); |
temp_ff.full = dfixed_const_666(16); |
sclk_eff_ff.full -= dfixed_mul(agpmode_ff, temp_ff); |
} |
/* TODO PCIE lanes may affect this - agpmode == 16?? */ |
|
if (ASIC_IS_R300(rdev)) { |
sclk_delay_ff.full = rfixed_const(250); |
sclk_delay_ff.full = dfixed_const(250); |
} else { |
if ((rdev->family == CHIP_RV100) || |
rdev->flags & RADEON_IS_IGP) { |
if (rdev->mc.vram_is_ddr) |
sclk_delay_ff.full = rfixed_const(41); |
sclk_delay_ff.full = dfixed_const(41); |
else |
sclk_delay_ff.full = rfixed_const(33); |
sclk_delay_ff.full = dfixed_const(33); |
} else { |
if (rdev->mc.vram_width == 128) |
sclk_delay_ff.full = rfixed_const(57); |
sclk_delay_ff.full = dfixed_const(57); |
else |
sclk_delay_ff.full = rfixed_const(41); |
sclk_delay_ff.full = dfixed_const(41); |
} |
} |
|
mc_latency_sclk.full = rfixed_div(sclk_delay_ff, sclk_eff_ff); |
mc_latency_sclk.full = dfixed_div(sclk_delay_ff, sclk_eff_ff); |
|
if (rdev->mc.vram_is_ddr) { |
if (rdev->mc.vram_width == 32) { |
k1.full = rfixed_const(40); |
k1.full = dfixed_const(40); |
c = 3; |
} else { |
k1.full = rfixed_const(20); |
k1.full = dfixed_const(20); |
c = 1; |
} |
} else { |
k1.full = rfixed_const(40); |
k1.full = dfixed_const(40); |
c = 3; |
} |
|
temp_ff.full = rfixed_const(2); |
mc_latency_mclk.full = rfixed_mul(trcd_ff, temp_ff); |
temp_ff.full = rfixed_const(c); |
mc_latency_mclk.full += rfixed_mul(tcas_ff, temp_ff); |
temp_ff.full = rfixed_const(4); |
mc_latency_mclk.full += rfixed_mul(tras_ff, temp_ff); |
mc_latency_mclk.full += rfixed_mul(trp_ff, temp_ff); |
temp_ff.full = dfixed_const(2); |
mc_latency_mclk.full = dfixed_mul(trcd_ff, temp_ff); |
temp_ff.full = dfixed_const(c); |
mc_latency_mclk.full += dfixed_mul(tcas_ff, temp_ff); |
temp_ff.full = dfixed_const(4); |
mc_latency_mclk.full += dfixed_mul(tras_ff, temp_ff); |
mc_latency_mclk.full += dfixed_mul(trp_ff, temp_ff); |
mc_latency_mclk.full += k1.full; |
|
mc_latency_mclk.full = rfixed_div(mc_latency_mclk, mclk_ff); |
mc_latency_mclk.full += rfixed_div(temp_ff, sclk_eff_ff); |
mc_latency_mclk.full = dfixed_div(mc_latency_mclk, mclk_ff); |
mc_latency_mclk.full += dfixed_div(temp_ff, sclk_eff_ff); |
|
/* |
HW cursor time assuming worst case of full size colour cursor. |
*/ |
temp_ff.full = rfixed_const((2 * (cur_size - (rdev->mc.vram_is_ddr + 1)))); |
temp_ff.full = dfixed_const((2 * (cur_size - (rdev->mc.vram_is_ddr + 1)))); |
temp_ff.full += trcd_ff.full; |
if (temp_ff.full < tras_ff.full) |
temp_ff.full = tras_ff.full; |
cur_latency_mclk.full = rfixed_div(temp_ff, mclk_ff); |
cur_latency_mclk.full = dfixed_div(temp_ff, mclk_ff); |
|
temp_ff.full = rfixed_const(cur_size); |
cur_latency_sclk.full = rfixed_div(temp_ff, sclk_eff_ff); |
temp_ff.full = dfixed_const(cur_size); |
cur_latency_sclk.full = dfixed_div(temp_ff, sclk_eff_ff); |
/* |
Find the total latency for the display data. |
*/ |
disp_latency_overhead.full = rfixed_const(8); |
disp_latency_overhead.full = rfixed_div(disp_latency_overhead, sclk_ff); |
disp_latency_overhead.full = dfixed_const(8); |
disp_latency_overhead.full = dfixed_div(disp_latency_overhead, sclk_ff); |
mc_latency_mclk.full += disp_latency_overhead.full + cur_latency_mclk.full; |
mc_latency_sclk.full += disp_latency_overhead.full + cur_latency_sclk.full; |
|
2558,16 → 2613,16 |
/* |
Find the drain rate of the display buffer. |
*/ |
temp_ff.full = rfixed_const((16/pixel_bytes1)); |
disp_drain_rate.full = rfixed_div(pix_clk, temp_ff); |
temp_ff.full = dfixed_const((16/pixel_bytes1)); |
disp_drain_rate.full = dfixed_div(pix_clk, temp_ff); |
|
/* |
Find the critical point of the display buffer. |
*/ |
crit_point_ff.full = rfixed_mul(disp_drain_rate, disp_latency); |
crit_point_ff.full += rfixed_const_half(0); |
crit_point_ff.full = dfixed_mul(disp_drain_rate, disp_latency); |
crit_point_ff.full += dfixed_const_half(0); |
|
critical_point = rfixed_trunc(crit_point_ff); |
critical_point = dfixed_trunc(crit_point_ff); |
|
if (rdev->disp_priority == 2) { |
critical_point = 0; |
2623,7 → 2678,7 |
} |
#endif |
|
DRM_DEBUG("GRPH_BUFFER_CNTL from to %x\n", |
DRM_DEBUG_KMS("GRPH_BUFFER_CNTL from to %x\n", |
/* (unsigned int)info->SavedReg->grph_buffer_cntl, */ |
(unsigned int)RREG32(RADEON_GRPH_BUFFER_CNTL)); |
} |
2638,8 → 2693,8 |
/* |
Find the drain rate of the display buffer. |
*/ |
temp_ff.full = rfixed_const((16/pixel_bytes2)); |
disp_drain_rate2.full = rfixed_div(pix_clk2, temp_ff); |
temp_ff.full = dfixed_const((16/pixel_bytes2)); |
disp_drain_rate2.full = dfixed_div(pix_clk2, temp_ff); |
|
grph2_cntl = RREG32(RADEON_GRPH2_BUFFER_CNTL); |
grph2_cntl &= ~(RADEON_GRPH_STOP_REQ_MASK); |
2660,8 → 2715,8 |
critical_point2 = 0; |
else { |
temp = (rdev->mc.vram_width * rdev->mc.vram_is_ddr + 1)/128; |
temp_ff.full = rfixed_const(temp); |
temp_ff.full = rfixed_mul(mclk_ff, temp_ff); |
temp_ff.full = dfixed_const(temp); |
temp_ff.full = dfixed_mul(mclk_ff, temp_ff); |
if (sclk_ff.full < temp_ff.full) |
temp_ff.full = sclk_ff.full; |
|
2669,15 → 2724,15 |
|
if (mode1) { |
temp_ff.full = read_return_rate.full - disp_drain_rate.full; |
time_disp1_drop_priority.full = rfixed_div(crit_point_ff, temp_ff); |
time_disp1_drop_priority.full = dfixed_div(crit_point_ff, temp_ff); |
} else { |
time_disp1_drop_priority.full = 0; |
} |
crit_point_ff.full = disp_latency.full + time_disp1_drop_priority.full + disp_latency.full; |
crit_point_ff.full = rfixed_mul(crit_point_ff, disp_drain_rate2); |
crit_point_ff.full += rfixed_const_half(0); |
crit_point_ff.full = dfixed_mul(crit_point_ff, disp_drain_rate2); |
crit_point_ff.full += dfixed_const_half(0); |
|
critical_point2 = rfixed_trunc(crit_point_ff); |
critical_point2 = dfixed_trunc(crit_point_ff); |
|
if (rdev->disp_priority == 2) { |
critical_point2 = 0; |
2719,13 → 2774,387 |
WREG32(RS400_DISP1_REQ_CNTL1, 0x28FBC3AC); |
} |
|
DRM_DEBUG("GRPH2_BUFFER_CNTL from to %x\n", |
DRM_DEBUG_KMS("GRPH2_BUFFER_CNTL from to %x\n", |
(unsigned int)RREG32(RADEON_GRPH2_BUFFER_CNTL)); |
} |
} |
|
#if 0 |
static inline void r100_cs_track_texture_print(struct r100_cs_track_texture *t) |
{ |
DRM_ERROR("pitch %d\n", t->pitch); |
DRM_ERROR("use_pitch %d\n", t->use_pitch); |
DRM_ERROR("width %d\n", t->width); |
DRM_ERROR("width_11 %d\n", t->width_11); |
DRM_ERROR("height %d\n", t->height); |
DRM_ERROR("height_11 %d\n", t->height_11); |
DRM_ERROR("num levels %d\n", t->num_levels); |
DRM_ERROR("depth %d\n", t->txdepth); |
DRM_ERROR("bpp %d\n", t->cpp); |
DRM_ERROR("coordinate type %d\n", t->tex_coord_type); |
DRM_ERROR("width round to power of 2 %d\n", t->roundup_w); |
DRM_ERROR("height round to power of 2 %d\n", t->roundup_h); |
DRM_ERROR("compress format %d\n", t->compress_format); |
} |
|
static int r100_track_compress_size(int compress_format, int w, int h) |
{ |
int block_width, block_height, block_bytes; |
int wblocks, hblocks; |
int min_wblocks; |
int sz; |
|
block_width = 4; |
block_height = 4; |
|
switch (compress_format) { |
case R100_TRACK_COMP_DXT1: |
block_bytes = 8; |
min_wblocks = 4; |
break; |
default: |
case R100_TRACK_COMP_DXT35: |
block_bytes = 16; |
min_wblocks = 2; |
break; |
} |
|
hblocks = (h + block_height - 1) / block_height; |
wblocks = (w + block_width - 1) / block_width; |
if (wblocks < min_wblocks) |
wblocks = min_wblocks; |
sz = wblocks * hblocks * block_bytes; |
return sz; |
} |
|
static int r100_cs_track_cube(struct radeon_device *rdev, |
struct r100_cs_track *track, unsigned idx) |
{ |
unsigned face, w, h; |
struct radeon_bo *cube_robj; |
unsigned long size; |
unsigned compress_format = track->textures[idx].compress_format; |
|
for (face = 0; face < 5; face++) { |
cube_robj = track->textures[idx].cube_info[face].robj; |
w = track->textures[idx].cube_info[face].width; |
h = track->textures[idx].cube_info[face].height; |
|
if (compress_format) { |
size = r100_track_compress_size(compress_format, w, h); |
} else |
size = w * h; |
size *= track->textures[idx].cpp; |
|
size += track->textures[idx].cube_info[face].offset; |
|
if (size > radeon_bo_size(cube_robj)) { |
DRM_ERROR("Cube texture offset greater than object size %lu %lu\n", |
size, radeon_bo_size(cube_robj)); |
r100_cs_track_texture_print(&track->textures[idx]); |
return -1; |
} |
} |
return 0; |
} |
|
static int r100_cs_track_texture_check(struct radeon_device *rdev, |
struct r100_cs_track *track) |
{ |
struct radeon_bo *robj; |
unsigned long size; |
unsigned u, i, w, h, d; |
int ret; |
|
for (u = 0; u < track->num_texture; u++) { |
if (!track->textures[u].enabled) |
continue; |
if (track->textures[u].lookup_disable) |
continue; |
robj = track->textures[u].robj; |
if (robj == NULL) { |
DRM_ERROR("No texture bound to unit %u\n", u); |
return -EINVAL; |
} |
size = 0; |
for (i = 0; i <= track->textures[u].num_levels; i++) { |
if (track->textures[u].use_pitch) { |
if (rdev->family < CHIP_R300) |
w = (track->textures[u].pitch / track->textures[u].cpp) / (1 << i); |
else |
w = track->textures[u].pitch / (1 << i); |
} else { |
w = track->textures[u].width; |
if (rdev->family >= CHIP_RV515) |
w |= track->textures[u].width_11; |
w = w / (1 << i); |
if (track->textures[u].roundup_w) |
w = roundup_pow_of_two(w); |
} |
h = track->textures[u].height; |
if (rdev->family >= CHIP_RV515) |
h |= track->textures[u].height_11; |
h = h / (1 << i); |
if (track->textures[u].roundup_h) |
h = roundup_pow_of_two(h); |
if (track->textures[u].tex_coord_type == 1) { |
d = (1 << track->textures[u].txdepth) / (1 << i); |
if (!d) |
d = 1; |
} else { |
d = 1; |
} |
if (track->textures[u].compress_format) { |
|
size += r100_track_compress_size(track->textures[u].compress_format, w, h) * d; |
/* compressed textures are block based */ |
} else |
size += w * h * d; |
} |
size *= track->textures[u].cpp; |
|
switch (track->textures[u].tex_coord_type) { |
case 0: |
case 1: |
break; |
case 2: |
if (track->separate_cube) { |
ret = r100_cs_track_cube(rdev, track, u); |
if (ret) |
return ret; |
} else |
size *= 6; |
break; |
default: |
DRM_ERROR("Invalid texture coordinate type %u for unit " |
"%u\n", track->textures[u].tex_coord_type, u); |
return -EINVAL; |
} |
if (size > radeon_bo_size(robj)) { |
DRM_ERROR("Texture of unit %u needs %lu bytes but is " |
"%lu\n", u, size, radeon_bo_size(robj)); |
r100_cs_track_texture_print(&track->textures[u]); |
return -EINVAL; |
} |
} |
return 0; |
} |
|
int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track) |
{ |
unsigned i; |
unsigned long size; |
unsigned prim_walk; |
unsigned nverts; |
unsigned num_cb = track->cb_dirty ? track->num_cb : 0; |
|
if (num_cb && !track->zb_cb_clear && !track->color_channel_mask && |
!track->blend_read_enable) |
num_cb = 0; |
|
for (i = 0; i < num_cb; i++) { |
if (track->cb[i].robj == NULL) { |
DRM_ERROR("[drm] No buffer for color buffer %d !\n", i); |
return -EINVAL; |
} |
size = track->cb[i].pitch * track->cb[i].cpp * track->maxy; |
size += track->cb[i].offset; |
if (size > radeon_bo_size(track->cb[i].robj)) { |
DRM_ERROR("[drm] Buffer too small for color buffer %d " |
"(need %lu have %lu) !\n", i, size, |
radeon_bo_size(track->cb[i].robj)); |
DRM_ERROR("[drm] color buffer %d (%u %u %u %u)\n", |
i, track->cb[i].pitch, track->cb[i].cpp, |
track->cb[i].offset, track->maxy); |
return -EINVAL; |
} |
} |
track->cb_dirty = false; |
|
if (track->zb_dirty && track->z_enabled) { |
if (track->zb.robj == NULL) { |
DRM_ERROR("[drm] No buffer for z buffer !\n"); |
return -EINVAL; |
} |
size = track->zb.pitch * track->zb.cpp * track->maxy; |
size += track->zb.offset; |
if (size > radeon_bo_size(track->zb.robj)) { |
DRM_ERROR("[drm] Buffer too small for z buffer " |
"(need %lu have %lu) !\n", size, |
radeon_bo_size(track->zb.robj)); |
DRM_ERROR("[drm] zbuffer (%u %u %u %u)\n", |
track->zb.pitch, track->zb.cpp, |
track->zb.offset, track->maxy); |
return -EINVAL; |
} |
} |
track->zb_dirty = false; |
|
if (track->aa_dirty && track->aaresolve) { |
if (track->aa.robj == NULL) { |
DRM_ERROR("[drm] No buffer for AA resolve buffer %d !\n", i); |
return -EINVAL; |
} |
/* I believe the format comes from colorbuffer0. */ |
size = track->aa.pitch * track->cb[0].cpp * track->maxy; |
size += track->aa.offset; |
if (size > radeon_bo_size(track->aa.robj)) { |
DRM_ERROR("[drm] Buffer too small for AA resolve buffer %d " |
"(need %lu have %lu) !\n", i, size, |
radeon_bo_size(track->aa.robj)); |
DRM_ERROR("[drm] AA resolve buffer %d (%u %u %u %u)\n", |
i, track->aa.pitch, track->cb[0].cpp, |
track->aa.offset, track->maxy); |
return -EINVAL; |
} |
} |
track->aa_dirty = false; |
|
prim_walk = (track->vap_vf_cntl >> 4) & 0x3; |
if (track->vap_vf_cntl & (1 << 14)) { |
nverts = track->vap_alt_nverts; |
} else { |
nverts = (track->vap_vf_cntl >> 16) & 0xFFFF; |
} |
switch (prim_walk) { |
case 1: |
for (i = 0; i < track->num_arrays; i++) { |
size = track->arrays[i].esize * track->max_indx * 4; |
if (track->arrays[i].robj == NULL) { |
DRM_ERROR("(PW %u) Vertex array %u no buffer " |
"bound\n", prim_walk, i); |
return -EINVAL; |
} |
if (size > radeon_bo_size(track->arrays[i].robj)) { |
dev_err(rdev->dev, "(PW %u) Vertex array %u " |
"need %lu dwords have %lu dwords\n", |
prim_walk, i, size >> 2, |
radeon_bo_size(track->arrays[i].robj) |
>> 2); |
DRM_ERROR("Max indices %u\n", track->max_indx); |
return -EINVAL; |
} |
} |
break; |
case 2: |
for (i = 0; i < track->num_arrays; i++) { |
size = track->arrays[i].esize * (nverts - 1) * 4; |
if (track->arrays[i].robj == NULL) { |
DRM_ERROR("(PW %u) Vertex array %u no buffer " |
"bound\n", prim_walk, i); |
return -EINVAL; |
} |
if (size > radeon_bo_size(track->arrays[i].robj)) { |
dev_err(rdev->dev, "(PW %u) Vertex array %u " |
"need %lu dwords have %lu dwords\n", |
prim_walk, i, size >> 2, |
radeon_bo_size(track->arrays[i].robj) |
>> 2); |
return -EINVAL; |
} |
} |
break; |
case 3: |
size = track->vtx_size * nverts; |
if (size != track->immd_dwords) { |
DRM_ERROR("IMMD draw %u dwors but needs %lu dwords\n", |
track->immd_dwords, size); |
DRM_ERROR("VAP_VF_CNTL.NUM_VERTICES %u, VTX_SIZE %u\n", |
nverts, track->vtx_size); |
return -EINVAL; |
} |
break; |
default: |
DRM_ERROR("[drm] Invalid primitive walk %d for VAP_VF_CNTL\n", |
prim_walk); |
return -EINVAL; |
} |
|
if (track->tex_dirty) { |
track->tex_dirty = false; |
return r100_cs_track_texture_check(rdev, track); |
} |
return 0; |
} |
|
void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track) |
{ |
unsigned i, face; |
|
track->cb_dirty = true; |
track->zb_dirty = true; |
track->tex_dirty = true; |
track->aa_dirty = true; |
|
if (rdev->family < CHIP_R300) { |
track->num_cb = 1; |
if (rdev->family <= CHIP_RS200) |
track->num_texture = 3; |
else |
track->num_texture = 6; |
track->maxy = 2048; |
track->separate_cube = 1; |
} else { |
track->num_cb = 4; |
track->num_texture = 16; |
track->maxy = 4096; |
track->separate_cube = 0; |
track->aaresolve = false; |
track->aa.robj = NULL; |
} |
|
for (i = 0; i < track->num_cb; i++) { |
track->cb[i].robj = NULL; |
track->cb[i].pitch = 8192; |
track->cb[i].cpp = 16; |
track->cb[i].offset = 0; |
} |
track->z_enabled = true; |
track->zb.robj = NULL; |
track->zb.pitch = 8192; |
track->zb.cpp = 4; |
track->zb.offset = 0; |
track->vtx_size = 0x7F; |
track->immd_dwords = 0xFFFFFFFFUL; |
track->num_arrays = 11; |
track->max_indx = 0x00FFFFFFUL; |
for (i = 0; i < track->num_arrays; i++) { |
track->arrays[i].robj = NULL; |
track->arrays[i].esize = 0x7F; |
} |
for (i = 0; i < track->num_texture; i++) { |
track->textures[i].compress_format = R100_TRACK_COMP_NONE; |
track->textures[i].pitch = 16536; |
track->textures[i].width = 16536; |
track->textures[i].height = 16536; |
track->textures[i].width_11 = 1 << 11; |
track->textures[i].height_11 = 1 << 11; |
track->textures[i].num_levels = 12; |
if (rdev->family <= CHIP_RS200) { |
track->textures[i].tex_coord_type = 0; |
track->textures[i].txdepth = 0; |
} else { |
track->textures[i].txdepth = 16; |
track->textures[i].tex_coord_type = 1; |
} |
track->textures[i].cpp = 64; |
track->textures[i].robj = NULL; |
/* CS IB emission code makes sure texture unit are disabled */ |
track->textures[i].enabled = false; |
track->textures[i].lookup_disable = false; |
track->textures[i].roundup_w = true; |
track->textures[i].roundup_h = true; |
if (track->separate_cube) |
for (face = 0; face < 5; face++) { |
track->textures[i].cube_info[face].robj = NULL; |
track->textures[i].cube_info[face].width = 16536; |
track->textures[i].cube_info[face].height = 16536; |
track->textures[i].cube_info[face].offset = 0; |
} |
} |
} |
#endif |
|
int r100_ring_test(struct radeon_device *rdev) |
{ |
uint32_t scratch; |
2758,7 → 3187,7 |
if (i < rdev->usec_timeout) { |
DRM_INFO("ring test succeeded in %d usecs\n", i); |
} else { |
DRM_ERROR("radeon: ring test failed (sracth(0x%04X)=0x%08X)\n", |
DRM_ERROR("radeon: ring test failed (scratch(0x%04X)=0x%08X)\n", |
scratch, tmp); |
r = -EINVAL; |
} |
2766,6 → 3195,96 |
return r; |
} |
|
#if 0 |
|
void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) |
{ |
radeon_ring_write(rdev, PACKET0(RADEON_CP_IB_BASE, 1)); |
radeon_ring_write(rdev, ib->gpu_addr); |
radeon_ring_write(rdev, ib->length_dw); |
} |
|
int r100_ib_test(struct radeon_device *rdev) |
{ |
struct radeon_ib *ib; |
uint32_t scratch; |
uint32_t tmp = 0; |
unsigned i; |
int r; |
|
r = radeon_scratch_get(rdev, &scratch); |
if (r) { |
DRM_ERROR("radeon: failed to get scratch reg (%d).\n", r); |
return r; |
} |
WREG32(scratch, 0xCAFEDEAD); |
r = radeon_ib_get(rdev, &ib); |
if (r) { |
return r; |
} |
ib->ptr[0] = PACKET0(scratch, 0); |
ib->ptr[1] = 0xDEADBEEF; |
ib->ptr[2] = PACKET2(0); |
ib->ptr[3] = PACKET2(0); |
ib->ptr[4] = PACKET2(0); |
ib->ptr[5] = PACKET2(0); |
ib->ptr[6] = PACKET2(0); |
ib->ptr[7] = PACKET2(0); |
ib->length_dw = 8; |
r = radeon_ib_schedule(rdev, ib); |
if (r) { |
radeon_scratch_free(rdev, scratch); |
radeon_ib_free(rdev, &ib); |
return r; |
} |
r = radeon_fence_wait(ib->fence, false); |
if (r) { |
return r; |
} |
for (i = 0; i < rdev->usec_timeout; i++) { |
tmp = RREG32(scratch); |
if (tmp == 0xDEADBEEF) { |
break; |
} |
DRM_UDELAY(1); |
} |
if (i < rdev->usec_timeout) { |
DRM_INFO("ib test succeeded in %u usecs\n", i); |
} else { |
DRM_ERROR("radeon: ib test failed (scratch(0x%04X)=0x%08X)\n", |
scratch, tmp); |
r = -EINVAL; |
} |
radeon_scratch_free(rdev, scratch); |
radeon_ib_free(rdev, &ib); |
return r; |
} |
|
void r100_ib_fini(struct radeon_device *rdev) |
{ |
radeon_ib_pool_fini(rdev); |
} |
|
int r100_ib_init(struct radeon_device *rdev) |
{ |
int r; |
|
r = radeon_ib_pool_init(rdev); |
if (r) { |
dev_err(rdev->dev, "failed initializing IB pool (%d).\n", r); |
r100_ib_fini(rdev); |
return r; |
} |
r = r100_ib_test(rdev); |
if (r) { |
dev_err(rdev->dev, "failed testing IB (%d).\n", r); |
r100_ib_fini(rdev); |
return r; |
} |
return 0; |
} |
#endif |
|
void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save) |
{ |
/* Shutdown CP we shouldn't need to do that but better be safe than |
2909,8 → 3428,6 |
r100_mc_program(rdev); |
/* Resume clock */ |
r100_clock_startup(rdev); |
/* Initialize GPU configuration (# pipes, ...) */ |
r100_gpu_init(rdev); |
/* Initialize GART (initialize after TTM so we can allocate |
* memory through TTM but finalize after TTM) */ |
r100_enable_bm(rdev); |
2925,12 → 3442,9 |
/* 1M ring buffer */ |
r = r100_cp_init(rdev, 1024 * 1024); |
if (r) { |
dev_err(rdev->dev, "failled initializing CP (%d).\n", r); |
dev_err(rdev->dev, "failed initializing CP (%d).\n", r); |
return r; |
} |
// r = r100_wb_init(rdev); |
// if (r) |
// dev_err(rdev->dev, "failled initializing WB (%d).\n", r); |
// r = r100_ib_init(rdev); |
// if (r) { |
// dev_err(rdev->dev, "failled initializing IB (%d).\n", r); |
2939,7 → 3453,30 |
return 0; |
} |
|
/* |
* Due to how kexec works, it can leave the hw fully initialised when it |
* boots the new kernel. However doing our init sequence with the CP and |
* WB stuff setup causes GPU hangs on the RN50 at least. So at startup |
* do some quick sanity checks and restore sane values to avoid this |
* problem. |
*/ |
void r100_restore_sanity(struct radeon_device *rdev) |
{ |
u32 tmp; |
|
tmp = RREG32(RADEON_CP_CSQ_CNTL); |
if (tmp) { |
WREG32(RADEON_CP_CSQ_CNTL, 0); |
} |
tmp = RREG32(RADEON_CP_RB_CNTL); |
if (tmp) { |
WREG32(RADEON_CP_RB_CNTL, 0); |
} |
tmp = RREG32(RADEON_SCRATCH_UMSK); |
if (tmp) { |
WREG32(RADEON_SCRATCH_UMSK, 0); |
} |
} |
|
int r100_init(struct radeon_device *rdev) |
{ |
2953,6 → 3490,8 |
radeon_scratch_init(rdev); |
/* Initialize surface registers */ |
radeon_surface_init(rdev); |
/* sanity check some register to avoid hangs like after kexec */ |
r100_restore_sanity(rdev); |
/* TODO: disable VGA need to use VGA request */ |
/* BIOS*/ |
if (!radeon_get_bios(rdev)) { |
2968,7 → 3507,7 |
return r; |
} |
/* Reset gpu before posting otherwise ATOM will enter infinite loop */ |
if (radeon_gpu_reset(rdev)) { |
if (radeon_asic_reset(rdev)) { |
dev_warn(rdev->dev, |
"GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", |
RREG32(R_000E40_RBBM_STATUS), |
2981,8 → 3520,6 |
r100_errata(rdev); |
/* Initialize clocks */ |
radeon_get_clock_info(rdev->ddev); |
/* Initialize power management */ |
radeon_pm_init(rdev); |
/* initialize AGP */ |
if (rdev->flags & RADEON_IS_AGP) { |
r = radeon_agp_init(rdev); |
3019,7 → 3556,6 |
// r100_ib_fini(rdev); |
if (rdev->flags & RADEON_IS_PCI) |
r100_pci_gart_fini(rdev); |
// radeon_irq_kms_fini(rdev); |
rdev->accel_working = false; |
} |
return 0; |