25,7 → 25,7 |
* Alex Deucher |
* Jerome Glisse |
*/ |
//#include <linux/seq_file.h> |
#include <linux/seq_file.h> |
#include "drmP.h" |
#include "drm.h" |
#include "radeon_drm.h" |
32,12 → 32,16 |
#include "radeon_microcode.h" |
#include "radeon_reg.h" |
#include "radeon.h" |
#include "r100d.h" |
|
#include "r100_reg_safe.h" |
#include "rn50_reg_safe.h" |
/* This files gather functions specifics to: |
* r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 |
* |
* Some of these functions might be used by newer ASICs. |
*/ |
int r200_init(struct radeon_device *rdev); |
void r100_hdp_reset(struct radeon_device *rdev); |
void r100_gpu_init(struct radeon_device *rdev); |
int r100_gui_wait_for_idle(struct radeon_device *rdev); |
46,6 → 50,7 |
void r100_gpu_wait_for_vsync2(struct radeon_device *rdev); |
int r100_debugfs_mc_info_init(struct radeon_device *rdev); |
|
|
/* |
* PCI GART |
*/ |
57,23 → 62,28 |
* could end up in wrong address. */ |
} |
|
int r100_pci_gart_enable(struct radeon_device *rdev) |
int r100_pci_gart_init(struct radeon_device *rdev) |
{ |
uint32_t tmp; |
int r; |
|
if (rdev->gart.table.ram.ptr) { |
WARN(1, "R100 PCI GART already initialized.\n"); |
return 0; |
} |
/* Initialize common gart structure */ |
r = radeon_gart_init(rdev); |
if (r) { |
if (r) |
return r; |
} |
if (rdev->gart.table.ram.ptr == NULL) { |
rdev->gart.table_size = rdev->gart.num_gpu_pages * 4; |
r = radeon_gart_table_ram_alloc(rdev); |
if (r) { |
return r; |
rdev->asic->gart_tlb_flush = &r100_pci_gart_tlb_flush; |
rdev->asic->gart_set_page = &r100_pci_gart_set_page; |
return radeon_gart_table_ram_alloc(rdev); |
} |
} |
|
int r100_pci_gart_enable(struct radeon_device *rdev) |
{ |
uint32_t tmp; |
|
/* discard memory request outside of configured range */ |
tmp = RREG32(RADEON_AIC_CNTL) | RADEON_DIS_OUT_OF_PCI_GART_ACCESS; |
WREG32(RADEON_AIC_CNTL, tmp); |
104,24 → 114,21 |
WREG32(RADEON_AIC_HI_ADDR, 0); |
} |
|
|
int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) |
{ |
if (i < 0 || i > rdev->gart.num_gpu_pages) { |
return -EINVAL; |
} |
rdev->gart.table.ram.ptr[i] = cpu_to_le32((uint32_t)addr); |
rdev->gart.table.ram.ptr[i] = cpu_to_le32(lower_32_bits(addr)); |
return 0; |
} |
|
int r100_gart_enable(struct radeon_device *rdev) |
void r100_pci_gart_fini(struct radeon_device *rdev) |
{ |
if (rdev->flags & RADEON_IS_AGP) { |
r100_pci_gart_disable(rdev); |
return 0; |
radeon_gart_table_ram_free(rdev); |
radeon_gart_fini(rdev); |
} |
return r100_pci_gart_enable(rdev); |
} |
|
|
/* |
173,8 → 180,12 |
DRM_ERROR("Failed to register debugfs file for R100 MC !\n"); |
} |
/* Write VRAM size in case we are limiting it */ |
WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.vram_size); |
tmp = rdev->mc.vram_location + rdev->mc.vram_size - 1; |
WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size); |
/* Novell bug 204882 for RN50/M6/M7 with 8/16/32MB VRAM, |
* if the aperture is 64MB but we have 32MB VRAM |
* we report only 32MB VRAM but we have to set MC_FB_LOCATION |
* to 64MB, otherwise the gpu accidentially dies */ |
tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1; |
tmp = REG_SET(RADEON_MC_FB_TOP, tmp >> 16); |
tmp |= REG_SET(RADEON_MC_FB_START, rdev->mc.vram_location >> 16); |
WREG32(RADEON_MC_FB_LOCATION, tmp); |
215,18 → 226,7 |
r100_pci_gart_disable(rdev); |
|
/* Setup GPU memory space */ |
rdev->mc.vram_location = 0xFFFFFFFFUL; |
rdev->mc.gtt_location = 0xFFFFFFFFUL; |
if (rdev->flags & RADEON_IS_AGP) { |
r = radeon_agp_init(rdev); |
if (r) { |
printk(KERN_WARNING "[drm] Disabling AGP\n"); |
rdev->flags &= ~RADEON_IS_AGP; |
rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; |
} else { |
rdev->mc.gtt_location = rdev->mc.agp_base; |
} |
} |
r = radeon_mc_setup(rdev); |
if (r) { |
return r; |
244,11 → 244,17 |
|
void r100_mc_fini(struct radeon_device *rdev) |
{ |
r100_pci_gart_disable(rdev); |
// radeon_gart_table_ram_free(rdev); |
// radeon_gart_fini(rdev); |
} |
|
u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc) |
{ |
if (crtc == 0) |
return RREG32(RADEON_CRTC_CRNT_FRAME); |
else |
return RREG32(RADEON_CRTC2_CRNT_FRAME); |
} |
|
|
/* |
* Fence emission |
*/ |
297,14 → 303,21 |
return r; |
} |
} |
WREG32(0x774, rdev->wb.gpu_addr); |
WREG32(0x70C, rdev->wb.gpu_addr + 1024); |
WREG32(0x770, 0xff); |
WREG32(R_000774_SCRATCH_ADDR, rdev->wb.gpu_addr); |
WREG32(R_00070C_CP_RB_RPTR_ADDR, |
S_00070C_RB_RPTR_ADDR((rdev->wb.gpu_addr + 1024) >> 2)); |
WREG32(R_000770_SCRATCH_UMSK, 0xff); |
return 0; |
} |
|
void r100_wb_disable(struct radeon_device *rdev) |
{ |
WREG32(R_000770_SCRATCH_UMSK, 0); |
} |
|
void r100_wb_fini(struct radeon_device *rdev) |
{ |
r100_wb_disable(rdev); |
if (rdev->wb.wb_obj) { |
// radeon_object_kunmap(rdev->wb.wb_obj); |
// radeon_object_unpin(rdev->wb.wb_obj); |
314,7 → 327,6 |
} |
} |
|
|
int r100_copy_blit(struct radeon_device *rdev, |
uint64_t src_offset, |
uint64_t dst_offset, |
393,6 → 405,21 |
/* |
* CP |
*/ |
static int r100_cp_wait_for_idle(struct radeon_device *rdev) |
{ |
unsigned i; |
u32 tmp; |
|
for (i = 0; i < rdev->usec_timeout; i++) { |
tmp = RREG32(R_000E40_RBBM_STATUS); |
if (!G_000E40_CP_CMDSTRM_BUSY(tmp)) { |
return 0; |
} |
udelay(1); |
} |
return -1; |
} |
|
void r100_ring_start(struct radeon_device *rdev) |
{ |
int r; |
483,6 → 510,12 |
} |
} |
|
static int r100_cp_init_microcode(struct radeon_device *rdev) |
{ |
return 0; |
} |
|
|
int r100_cp_init(struct radeon_device *rdev, unsigned ring_size) |
{ |
unsigned rb_bufsz; |
517,6 → 550,15 |
} else { |
DRM_INFO("radeon: cp idle (0x%08X)\n", tmp); |
} |
|
if (!rdev->me_fw) { |
r = r100_cp_init_microcode(rdev); |
if (r) { |
DRM_ERROR("Failed to load firmware!\n"); |
return r; |
} |
} |
|
/* Align ring size */ |
rb_bufsz = drm_order(ring_size / 8); |
ring_size = (1 << (rb_bufsz + 1)) * 4; |
588,12 → 630,13 |
return 0; |
} |
|
|
void r100_cp_fini(struct radeon_device *rdev) |
{ |
if (r100_cp_wait_for_idle(rdev)) { |
DRM_ERROR("Wait for CP idle timeout, shutting down CP.\n"); |
} |
/* Disable ring */ |
rdev->cp.ready = false; |
WREG32(RADEON_CP_CSQ_CNTL, 0); |
r100_cp_disable(rdev); |
radeon_ring_fini(rdev); |
DRM_INFO("radeon: cp finalized\n"); |
} |
610,7 → 653,6 |
} |
} |
|
|
int r100_cp_reset(struct radeon_device *rdev) |
{ |
uint32_t tmp; |
617,9 → 659,8 |
bool reinit_cp; |
int i; |
|
dbgprintf("%s\n",__FUNCTION__); |
ENTER(); |
|
|
reinit_cp = rdev->cp.ready; |
rdev->cp.ready = false; |
WREG32(RADEON_CP_CSQ_MODE, 0); |
647,6 → 688,13 |
return -1; |
} |
|
void r100_cp_commit(struct radeon_device *rdev) |
{ |
WREG32(RADEON_CP_RB_WPTR, rdev->cp.wptr); |
(void)RREG32(RADEON_CP_RB_WPTR); |
} |
|
|
#if 0 |
/* |
* CS functions |
725,7 → 773,7 |
unsigned idx) |
{ |
struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx]; |
uint32_t header = ib_chunk->kdata[idx]; |
uint32_t header; |
|
if (idx >= ib_chunk->length_dw) { |
DRM_ERROR("Can not parse packet at %d after CS end %d !\n", |
732,6 → 780,7 |
idx, ib_chunk->length_dw); |
return -EINVAL; |
} |
header = ib_chunk->kdata[idx]; |
pkt->idx = idx; |
pkt->type = CP_PACKET_GET_TYPE(header); |
pkt->count = CP_PACKET_GET_COUNT(header); |
759,6 → 808,102 |
} |
|
/** |
* r100_cs_packet_next_vline() - parse userspace VLINE packet |
* @parser: parser structure holding parsing context. |
* |
* Userspace sends a special sequence for VLINE waits. |
* PACKET0 - VLINE_START_END + value |
* PACKET0 - WAIT_UNTIL +_value |
* RELOC (P3) - crtc_id in reloc. |
* |
* This function parses this and relocates the VLINE START END |
* and WAIT UNTIL packets to the correct crtc. |
* It also detects a switched off crtc and nulls out the |
* wait in that case. |
*/ |
int r100_cs_packet_parse_vline(struct radeon_cs_parser *p) |
{ |
struct radeon_cs_chunk *ib_chunk; |
struct drm_mode_object *obj; |
struct drm_crtc *crtc; |
struct radeon_crtc *radeon_crtc; |
struct radeon_cs_packet p3reloc, waitreloc; |
int crtc_id; |
int r; |
uint32_t header, h_idx, reg; |
|
ib_chunk = &p->chunks[p->chunk_ib_idx]; |
|
/* parse the wait until */ |
r = r100_cs_packet_parse(p, &waitreloc, p->idx); |
if (r) |
return r; |
|
/* check its a wait until and only 1 count */ |
if (waitreloc.reg != RADEON_WAIT_UNTIL || |
waitreloc.count != 0) { |
DRM_ERROR("vline wait had illegal wait until segment\n"); |
r = -EINVAL; |
return r; |
} |
|
if (ib_chunk->kdata[waitreloc.idx + 1] != RADEON_WAIT_CRTC_VLINE) { |
DRM_ERROR("vline wait had illegal wait until\n"); |
r = -EINVAL; |
return r; |
} |
|
/* jump over the NOP */ |
r = r100_cs_packet_parse(p, &p3reloc, p->idx); |
if (r) |
return r; |
|
h_idx = p->idx - 2; |
p->idx += waitreloc.count; |
p->idx += p3reloc.count; |
|
header = ib_chunk->kdata[h_idx]; |
crtc_id = ib_chunk->kdata[h_idx + 5]; |
reg = ib_chunk->kdata[h_idx] >> 2; |
mutex_lock(&p->rdev->ddev->mode_config.mutex); |
obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC); |
if (!obj) { |
DRM_ERROR("cannot find crtc %d\n", crtc_id); |
r = -EINVAL; |
goto out; |
} |
crtc = obj_to_crtc(obj); |
radeon_crtc = to_radeon_crtc(crtc); |
crtc_id = radeon_crtc->crtc_id; |
|
if (!crtc->enabled) { |
/* if the CRTC isn't enabled - we need to nop out the wait until */ |
ib_chunk->kdata[h_idx + 2] = PACKET2(0); |
ib_chunk->kdata[h_idx + 3] = PACKET2(0); |
} else if (crtc_id == 1) { |
switch (reg) { |
case AVIVO_D1MODE_VLINE_START_END: |
header &= R300_CP_PACKET0_REG_MASK; |
header |= AVIVO_D2MODE_VLINE_START_END >> 2; |
break; |
case RADEON_CRTC_GUI_TRIG_VLINE: |
header &= R300_CP_PACKET0_REG_MASK; |
header |= RADEON_CRTC2_GUI_TRIG_VLINE >> 2; |
break; |
default: |
DRM_ERROR("unknown crtc reloc\n"); |
r = -EINVAL; |
goto out; |
} |
ib_chunk->kdata[h_idx] = header; |
ib_chunk->kdata[h_idx + 3] |= RADEON_ENG_DISPLAY_SELECT_CRTC1; |
} |
out: |
mutex_unlock(&p->rdev->ddev->mode_config.mutex); |
return r; |
} |
|
/** |
* r100_cs_packet_next_reloc() - parse next packet which should be reloc packet3 |
* @parser: parser structure holding parsing context. |
* @data: pointer to relocation data |
808,33 → 953,95 |
return 0; |
} |
|
static int r100_get_vtx_size(uint32_t vtx_fmt) |
{ |
int vtx_size; |
vtx_size = 2; |
/* ordered according to bits in spec */ |
if (vtx_fmt & RADEON_SE_VTX_FMT_W0) |
vtx_size++; |
if (vtx_fmt & RADEON_SE_VTX_FMT_FPCOLOR) |
vtx_size += 3; |
if (vtx_fmt & RADEON_SE_VTX_FMT_FPALPHA) |
vtx_size++; |
if (vtx_fmt & RADEON_SE_VTX_FMT_PKCOLOR) |
vtx_size++; |
if (vtx_fmt & RADEON_SE_VTX_FMT_FPSPEC) |
vtx_size += 3; |
if (vtx_fmt & RADEON_SE_VTX_FMT_FPFOG) |
vtx_size++; |
if (vtx_fmt & RADEON_SE_VTX_FMT_PKSPEC) |
vtx_size++; |
if (vtx_fmt & RADEON_SE_VTX_FMT_ST0) |
vtx_size += 2; |
if (vtx_fmt & RADEON_SE_VTX_FMT_ST1) |
vtx_size += 2; |
if (vtx_fmt & RADEON_SE_VTX_FMT_Q1) |
vtx_size++; |
if (vtx_fmt & RADEON_SE_VTX_FMT_ST2) |
vtx_size += 2; |
if (vtx_fmt & RADEON_SE_VTX_FMT_Q2) |
vtx_size++; |
if (vtx_fmt & RADEON_SE_VTX_FMT_ST3) |
vtx_size += 2; |
if (vtx_fmt & RADEON_SE_VTX_FMT_Q3) |
vtx_size++; |
if (vtx_fmt & RADEON_SE_VTX_FMT_Q0) |
vtx_size++; |
/* blend weight */ |
if (vtx_fmt & (0x7 << 15)) |
vtx_size += (vtx_fmt >> 15) & 0x7; |
if (vtx_fmt & RADEON_SE_VTX_FMT_N0) |
vtx_size += 3; |
if (vtx_fmt & RADEON_SE_VTX_FMT_XY1) |
vtx_size += 2; |
if (vtx_fmt & RADEON_SE_VTX_FMT_Z1) |
vtx_size++; |
if (vtx_fmt & RADEON_SE_VTX_FMT_W1) |
vtx_size++; |
if (vtx_fmt & RADEON_SE_VTX_FMT_N1) |
vtx_size++; |
if (vtx_fmt & RADEON_SE_VTX_FMT_Z) |
vtx_size++; |
return vtx_size; |
} |
|
static int r100_packet0_check(struct radeon_cs_parser *p, |
struct radeon_cs_packet *pkt) |
struct radeon_cs_packet *pkt, |
unsigned idx, unsigned reg) |
{ |
struct radeon_cs_chunk *ib_chunk; |
struct radeon_cs_reloc *reloc; |
struct r100_cs_track *track; |
volatile uint32_t *ib; |
uint32_t tmp; |
unsigned reg; |
unsigned i; |
unsigned idx; |
bool onereg; |
int r; |
int i, face; |
u32 tile_flags = 0; |
|
ib = p->ib->ptr; |
ib_chunk = &p->chunks[p->chunk_ib_idx]; |
idx = pkt->idx + 1; |
reg = pkt->reg; |
onereg = false; |
if (CP_PACKET0_GET_ONE_REG_WR(ib_chunk->kdata[pkt->idx])) { |
onereg = true; |
track = (struct r100_cs_track *)p->track; |
|
switch (reg) { |
case RADEON_CRTC_GUI_TRIG_VLINE: |
r = r100_cs_packet_parse_vline(p); |
if (r) { |
DRM_ERROR("No reloc for ib[%d]=0x%04X\n", |
idx, reg); |
r100_cs_dump_packet(p, pkt); |
return r; |
} |
for (i = 0; i <= pkt->count; i++, idx++, reg += 4) { |
switch (reg) { |
break; |
/* FIXME: only allow PACKET3 blit? easier to check for out of |
* range access */ |
case RADEON_DST_PITCH_OFFSET: |
case RADEON_SRC_PITCH_OFFSET: |
r = r100_reloc_pitch_offset(p, pkt, idx, reg); |
if (r) |
return r; |
break; |
case RADEON_RB3D_DEPTHOFFSET: |
r = r100_cs_packet_next_reloc(p, &reloc); |
if (r) { |
DRM_ERROR("No reloc for ib[%d]=0x%04X\n", |
842,39 → 1049,26 |
r100_cs_dump_packet(p, pkt); |
return r; |
} |
tmp = ib_chunk->kdata[idx] & 0x003fffff; |
tmp += (((u32)reloc->lobj.gpu_offset) >> 10); |
ib[idx] = (ib_chunk->kdata[idx] & 0xffc00000) | tmp; |
track->zb.robj = reloc->robj; |
track->zb.offset = ib_chunk->kdata[idx]; |
ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); |
break; |
case RADEON_RB3D_DEPTHOFFSET: |
case RADEON_RB3D_COLOROFFSET: |
case R300_RB3D_COLOROFFSET0: |
case R300_ZB_DEPTHOFFSET: |
case R200_PP_TXOFFSET_0: |
case R200_PP_TXOFFSET_1: |
case R200_PP_TXOFFSET_2: |
case R200_PP_TXOFFSET_3: |
case R200_PP_TXOFFSET_4: |
case R200_PP_TXOFFSET_5: |
r = r100_cs_packet_next_reloc(p, &reloc); |
if (r) { |
DRM_ERROR("No reloc for ib[%d]=0x%04X\n", |
idx, reg); |
r100_cs_dump_packet(p, pkt); |
return r; |
} |
track->cb[0].robj = reloc->robj; |
track->cb[0].offset = ib_chunk->kdata[idx]; |
ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); |
break; |
case RADEON_PP_TXOFFSET_0: |
case RADEON_PP_TXOFFSET_1: |
case RADEON_PP_TXOFFSET_2: |
case R300_TX_OFFSET_0: |
case R300_TX_OFFSET_0+4: |
case R300_TX_OFFSET_0+8: |
case R300_TX_OFFSET_0+12: |
case R300_TX_OFFSET_0+16: |
case R300_TX_OFFSET_0+20: |
case R300_TX_OFFSET_0+24: |
case R300_TX_OFFSET_0+28: |
case R300_TX_OFFSET_0+32: |
case R300_TX_OFFSET_0+36: |
case R300_TX_OFFSET_0+40: |
case R300_TX_OFFSET_0+44: |
case R300_TX_OFFSET_0+48: |
case R300_TX_OFFSET_0+52: |
case R300_TX_OFFSET_0+56: |
case R300_TX_OFFSET_0+60: |
i = (reg - RADEON_PP_TXOFFSET_0) / 24; |
r = r100_cs_packet_next_reloc(p, &reloc); |
if (r) { |
DRM_ERROR("No reloc for ib[%d]=0x%04X\n", |
883,16 → 1077,233 |
return r; |
} |
ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); |
track->textures[i].robj = reloc->robj; |
break; |
case RADEON_PP_CUBIC_OFFSET_T0_0: |
case RADEON_PP_CUBIC_OFFSET_T0_1: |
case RADEON_PP_CUBIC_OFFSET_T0_2: |
case RADEON_PP_CUBIC_OFFSET_T0_3: |
case RADEON_PP_CUBIC_OFFSET_T0_4: |
i = (reg - RADEON_PP_CUBIC_OFFSET_T0_0) / 4; |
r = r100_cs_packet_next_reloc(p, &reloc); |
if (r) { |
DRM_ERROR("No reloc for ib[%d]=0x%04X\n", |
idx, reg); |
r100_cs_dump_packet(p, pkt); |
return r; |
} |
track->textures[0].cube_info[i].offset = ib_chunk->kdata[idx]; |
ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); |
track->textures[0].cube_info[i].robj = reloc->robj; |
break; |
case RADEON_PP_CUBIC_OFFSET_T1_0: |
case RADEON_PP_CUBIC_OFFSET_T1_1: |
case RADEON_PP_CUBIC_OFFSET_T1_2: |
case RADEON_PP_CUBIC_OFFSET_T1_3: |
case RADEON_PP_CUBIC_OFFSET_T1_4: |
i = (reg - RADEON_PP_CUBIC_OFFSET_T1_0) / 4; |
r = r100_cs_packet_next_reloc(p, &reloc); |
if (r) { |
DRM_ERROR("No reloc for ib[%d]=0x%04X\n", |
idx, reg); |
r100_cs_dump_packet(p, pkt); |
return r; |
} |
track->textures[1].cube_info[i].offset = ib_chunk->kdata[idx]; |
ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); |
track->textures[1].cube_info[i].robj = reloc->robj; |
break; |
case RADEON_PP_CUBIC_OFFSET_T2_0: |
case RADEON_PP_CUBIC_OFFSET_T2_1: |
case RADEON_PP_CUBIC_OFFSET_T2_2: |
case RADEON_PP_CUBIC_OFFSET_T2_3: |
case RADEON_PP_CUBIC_OFFSET_T2_4: |
i = (reg - RADEON_PP_CUBIC_OFFSET_T2_0) / 4; |
r = r100_cs_packet_next_reloc(p, &reloc); |
if (r) { |
DRM_ERROR("No reloc for ib[%d]=0x%04X\n", |
idx, reg); |
r100_cs_dump_packet(p, pkt); |
return r; |
} |
track->textures[2].cube_info[i].offset = ib_chunk->kdata[idx]; |
ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); |
track->textures[2].cube_info[i].robj = reloc->robj; |
break; |
case RADEON_RE_WIDTH_HEIGHT: |
track->maxy = ((ib_chunk->kdata[idx] >> 16) & 0x7FF); |
break; |
case RADEON_RB3D_COLORPITCH: |
r = r100_cs_packet_next_reloc(p, &reloc); |
if (r) { |
DRM_ERROR("No reloc for ib[%d]=0x%04X\n", |
idx, reg); |
r100_cs_dump_packet(p, pkt); |
return r; |
} |
|
if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) |
tile_flags |= RADEON_COLOR_TILE_ENABLE; |
if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) |
tile_flags |= RADEON_COLOR_MICROTILE_ENABLE; |
|
tmp = ib_chunk->kdata[idx] & ~(0x7 << 16); |
tmp |= tile_flags; |
ib[idx] = tmp; |
|
track->cb[0].pitch = ib_chunk->kdata[idx] & RADEON_COLORPITCH_MASK; |
break; |
case RADEON_RB3D_DEPTHPITCH: |
track->zb.pitch = ib_chunk->kdata[idx] & RADEON_DEPTHPITCH_MASK; |
break; |
case RADEON_RB3D_CNTL: |
switch ((ib_chunk->kdata[idx] >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f) { |
case 7: |
case 8: |
case 9: |
case 11: |
case 12: |
track->cb[0].cpp = 1; |
break; |
case 3: |
case 4: |
case 15: |
track->cb[0].cpp = 2; |
break; |
case 6: |
track->cb[0].cpp = 4; |
break; |
default: |
/* FIXME: we don't want to allow anyothers packet */ |
DRM_ERROR("Invalid color buffer format (%d) !\n", |
((ib_chunk->kdata[idx] >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f)); |
return -EINVAL; |
} |
track->z_enabled = !!(ib_chunk->kdata[idx] & RADEON_Z_ENABLE); |
break; |
case RADEON_RB3D_ZSTENCILCNTL: |
switch (ib_chunk->kdata[idx] & 0xf) { |
case 0: |
track->zb.cpp = 2; |
break; |
case 2: |
case 3: |
case 4: |
case 5: |
case 9: |
case 11: |
track->zb.cpp = 4; |
break; |
default: |
break; |
} |
if (onereg) { |
/* FIXME: forbid onereg write to register on relocate */ |
break; |
case RADEON_RB3D_ZPASS_ADDR: |
r = r100_cs_packet_next_reloc(p, &reloc); |
if (r) { |
DRM_ERROR("No reloc for ib[%d]=0x%04X\n", |
idx, reg); |
r100_cs_dump_packet(p, pkt); |
return r; |
} |
ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); |
break; |
case RADEON_PP_CNTL: |
{ |
uint32_t temp = ib_chunk->kdata[idx] >> 4; |
for (i = 0; i < track->num_texture; i++) |
track->textures[i].enabled = !!(temp & (1 << i)); |
} |
break; |
case RADEON_SE_VF_CNTL: |
track->vap_vf_cntl = ib_chunk->kdata[idx]; |
break; |
case RADEON_SE_VTX_FMT: |
track->vtx_size = r100_get_vtx_size(ib_chunk->kdata[idx]); |
break; |
case RADEON_PP_TEX_SIZE_0: |
case RADEON_PP_TEX_SIZE_1: |
case RADEON_PP_TEX_SIZE_2: |
i = (reg - RADEON_PP_TEX_SIZE_0) / 8; |
track->textures[i].width = (ib_chunk->kdata[idx] & RADEON_TEX_USIZE_MASK) + 1; |
track->textures[i].height = ((ib_chunk->kdata[idx] & RADEON_TEX_VSIZE_MASK) >> RADEON_TEX_VSIZE_SHIFT) + 1; |
break; |
case RADEON_PP_TEX_PITCH_0: |
case RADEON_PP_TEX_PITCH_1: |
case RADEON_PP_TEX_PITCH_2: |
i = (reg - RADEON_PP_TEX_PITCH_0) / 8; |
track->textures[i].pitch = ib_chunk->kdata[idx] + 32; |
break; |
case RADEON_PP_TXFILTER_0: |
case RADEON_PP_TXFILTER_1: |
case RADEON_PP_TXFILTER_2: |
i = (reg - RADEON_PP_TXFILTER_0) / 24; |
track->textures[i].num_levels = ((ib_chunk->kdata[idx] & RADEON_MAX_MIP_LEVEL_MASK) |
>> RADEON_MAX_MIP_LEVEL_SHIFT); |
tmp = (ib_chunk->kdata[idx] >> 23) & 0x7; |
if (tmp == 2 || tmp == 6) |
track->textures[i].roundup_w = false; |
tmp = (ib_chunk->kdata[idx] >> 27) & 0x7; |
if (tmp == 2 || tmp == 6) |
track->textures[i].roundup_h = false; |
break; |
case RADEON_PP_TXFORMAT_0: |
case RADEON_PP_TXFORMAT_1: |
case RADEON_PP_TXFORMAT_2: |
i = (reg - RADEON_PP_TXFORMAT_0) / 24; |
if (ib_chunk->kdata[idx] & RADEON_TXFORMAT_NON_POWER2) { |
track->textures[i].use_pitch = 1; |
} else { |
track->textures[i].use_pitch = 0; |
track->textures[i].width = 1 << ((ib_chunk->kdata[idx] >> RADEON_TXFORMAT_WIDTH_SHIFT) & RADEON_TXFORMAT_WIDTH_MASK); |
track->textures[i].height = 1 << ((ib_chunk->kdata[idx] >> RADEON_TXFORMAT_HEIGHT_SHIFT) & RADEON_TXFORMAT_HEIGHT_MASK); |
} |
if (ib_chunk->kdata[idx] & RADEON_TXFORMAT_CUBIC_MAP_ENABLE) |
track->textures[i].tex_coord_type = 2; |
switch ((ib_chunk->kdata[idx] & RADEON_TXFORMAT_FORMAT_MASK)) { |
case RADEON_TXFORMAT_I8: |
case RADEON_TXFORMAT_RGB332: |
case RADEON_TXFORMAT_Y8: |
track->textures[i].cpp = 1; |
break; |
case RADEON_TXFORMAT_AI88: |
case RADEON_TXFORMAT_ARGB1555: |
case RADEON_TXFORMAT_RGB565: |
case RADEON_TXFORMAT_ARGB4444: |
case RADEON_TXFORMAT_VYUY422: |
case RADEON_TXFORMAT_YVYU422: |
case RADEON_TXFORMAT_DXT1: |
case RADEON_TXFORMAT_SHADOW16: |
case RADEON_TXFORMAT_LDUDV655: |
case RADEON_TXFORMAT_DUDV88: |
track->textures[i].cpp = 2; |
break; |
case RADEON_TXFORMAT_ARGB8888: |
case RADEON_TXFORMAT_RGBA8888: |
case RADEON_TXFORMAT_DXT23: |
case RADEON_TXFORMAT_DXT45: |
case RADEON_TXFORMAT_SHADOW32: |
case RADEON_TXFORMAT_LDUDUV8888: |
track->textures[i].cpp = 4; |
break; |
} |
track->textures[i].cube_info[4].width = 1 << ((ib_chunk->kdata[idx] >> 16) & 0xf); |
track->textures[i].cube_info[4].height = 1 << ((ib_chunk->kdata[idx] >> 20) & 0xf); |
break; |
case RADEON_PP_CUBIC_FACES_0: |
case RADEON_PP_CUBIC_FACES_1: |
case RADEON_PP_CUBIC_FACES_2: |
tmp = ib_chunk->kdata[idx]; |
i = (reg - RADEON_PP_CUBIC_FACES_0) / 4; |
for (face = 0; face < 4; face++) { |
track->textures[i].cube_info[face].width = 1 << ((tmp >> (face * 8)) & 0xf); |
track->textures[i].cube_info[face].height = 1 << ((tmp >> ((face * 8) + 4)) & 0xf); |
} |
break; |
default: |
printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n", |
reg, idx); |
return -EINVAL; |
} |
return 0; |
} |
|
920,6 → 1331,7 |
{ |
struct radeon_cs_chunk *ib_chunk; |
struct radeon_cs_reloc *reloc; |
struct r100_cs_track *track; |
unsigned idx; |
unsigned i, c; |
volatile uint32_t *ib; |
928,9 → 1340,11 |
ib = p->ib->ptr; |
ib_chunk = &p->chunks[p->chunk_ib_idx]; |
idx = pkt->idx + 1; |
track = (struct r100_cs_track *)p->track; |
switch (pkt->opcode) { |
case PACKET3_3D_LOAD_VBPNTR: |
c = ib_chunk->kdata[idx++]; |
track->num_arrays = c; |
for (i = 0; i < (c - 1); i += 2, idx += 3) { |
r = r100_cs_packet_next_reloc(p, &reloc); |
if (r) { |
940,6 → 1354,9 |
return r; |
} |
ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset); |
track->arrays[i + 0].robj = reloc->robj; |
track->arrays[i + 0].esize = ib_chunk->kdata[idx] >> 8; |
track->arrays[i + 0].esize &= 0x7F; |
r = r100_cs_packet_next_reloc(p, &reloc); |
if (r) { |
DRM_ERROR("No reloc for packet3 %d\n", |
948,6 → 1365,9 |
return r; |
} |
ib[idx+2] = ib_chunk->kdata[idx+2] + ((u32)reloc->lobj.gpu_offset); |
track->arrays[i + 1].robj = reloc->robj; |
track->arrays[i + 1].esize = ib_chunk->kdata[idx] >> 24; |
track->arrays[i + 1].esize &= 0x7F; |
} |
if (c & 1) { |
r = r100_cs_packet_next_reloc(p, &reloc); |
958,6 → 1378,9 |
return r; |
} |
ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset); |
track->arrays[i + 0].robj = reloc->robj; |
track->arrays[i + 0].esize = ib_chunk->kdata[idx] >> 8; |
track->arrays[i + 0].esize &= 0x7F; |
} |
break; |
case PACKET3_INDX_BUFFER: |
974,7 → 1397,6 |
} |
break; |
case 0x23: |
/* FIXME: cleanup */ |
/* 3D_RNDR_GEN_INDX_PRIM on r100/r200 */ |
r = r100_cs_packet_next_reloc(p, &reloc); |
if (r) { |
983,18 → 1405,71 |
return r; |
} |
ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); |
track->num_arrays = 1; |
track->vtx_size = r100_get_vtx_size(ib_chunk->kdata[idx+2]); |
|
track->arrays[0].robj = reloc->robj; |
track->arrays[0].esize = track->vtx_size; |
|
track->max_indx = ib_chunk->kdata[idx+1]; |
|
track->vap_vf_cntl = ib_chunk->kdata[idx+3]; |
track->immd_dwords = pkt->count - 1; |
r = r100_cs_track_check(p->rdev, track); |
if (r) |
return r; |
break; |
case PACKET3_3D_DRAW_IMMD: |
if (((ib_chunk->kdata[idx+1] >> 4) & 0x3) != 3) { |
DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n"); |
return -EINVAL; |
} |
track->vap_vf_cntl = ib_chunk->kdata[idx+1]; |
track->immd_dwords = pkt->count - 1; |
r = r100_cs_track_check(p->rdev, track); |
if (r) |
return r; |
break; |
/* triggers drawing using in-packet vertex data */ |
case PACKET3_3D_DRAW_IMMD_2: |
if (((ib_chunk->kdata[idx] >> 4) & 0x3) != 3) { |
DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n"); |
return -EINVAL; |
} |
track->vap_vf_cntl = ib_chunk->kdata[idx]; |
track->immd_dwords = pkt->count; |
r = r100_cs_track_check(p->rdev, track); |
if (r) |
return r; |
break; |
/* triggers drawing using in-packet vertex data */ |
case PACKET3_3D_DRAW_VBUF_2: |
track->vap_vf_cntl = ib_chunk->kdata[idx]; |
r = r100_cs_track_check(p->rdev, track); |
if (r) |
return r; |
break; |
/* triggers drawing of vertex buffers setup elsewhere */ |
case PACKET3_3D_DRAW_INDX_2: |
track->vap_vf_cntl = ib_chunk->kdata[idx]; |
r = r100_cs_track_check(p->rdev, track); |
if (r) |
return r; |
break; |
/* triggers drawing using indices to vertex buffer */ |
case PACKET3_3D_DRAW_VBUF: |
track->vap_vf_cntl = ib_chunk->kdata[idx + 1]; |
r = r100_cs_track_check(p->rdev, track); |
if (r) |
return r; |
break; |
/* triggers drawing of vertex buffers setup elsewhere */ |
case PACKET3_3D_DRAW_INDX: |
track->vap_vf_cntl = ib_chunk->kdata[idx + 1]; |
r = r100_cs_track_check(p->rdev, track); |
if (r) |
return r; |
break; |
/* triggers drawing using indices to vertex buffer */ |
case PACKET3_NOP: |
break; |
1008,8 → 1483,12 |
int r100_cs_parse(struct radeon_cs_parser *p) |
{ |
struct radeon_cs_packet pkt; |
struct r100_cs_track *track; |
int r; |
|
track = kzalloc(sizeof(*track), GFP_KERNEL); |
r100_cs_track_clear(p->rdev, track); |
p->track = track; |
do { |
r = r100_cs_packet_parse(p, &pkt, p->idx); |
if (r) { |
1018,7 → 1497,16 |
p->idx += pkt.count + 2; |
switch (pkt.type) { |
case PACKET_TYPE0: |
r = r100_packet0_check(p, &pkt); |
if (p->rdev->family >= CHIP_R200) |
r = r100_cs_parse_packet0(p, &pkt, |
p->rdev->config.r100.reg_safe_bm, |
p->rdev->config.r100.reg_safe_bm_size, |
&r200_packet0_check); |
else |
r = r100_cs_parse_packet0(p, &pkt, |
p->rdev->config.r100.reg_safe_bm, |
p->rdev->config.r100.reg_safe_bm_size, |
&r100_packet0_check); |
break; |
case PACKET_TYPE2: |
break; |
1057,8 → 1545,6 |
} |
} |
|
|
|
/* Wait for vertical sync on primary CRTC */ |
void r100_gpu_wait_for_vsync(struct radeon_device *rdev) |
{ |
1163,7 → 1649,7 |
{ |
uint32_t tmp; |
|
dbgprintf("%s\n",__FUNCTION__); |
ENTER(); |
|
tmp = RREG32(RADEON_HOST_PATH_CNTL) & RADEON_HDP_APER_CNTL; |
tmp |= (7 << 28); |
1180,7 → 1666,7 |
uint32_t tmp; |
int i; |
|
dbgprintf("%s\n",__FUNCTION__); |
ENTER(); |
|
WREG32(RADEON_RBBM_SOFT_RESET, RADEON_SOFT_RESET_E2); |
(void)RREG32(RADEON_RBBM_SOFT_RESET); |
1269,31 → 1755,117 |
} |
} |
|
void r100_vram_info(struct radeon_device *rdev) |
static u32 r100_get_accessible_vram(struct radeon_device *rdev) |
{ |
r100_vram_get_type(rdev); |
u32 aper_size; |
u8 byte; |
|
aper_size = RREG32(RADEON_CONFIG_APER_SIZE); |
|
/* Set HDP_APER_CNTL only on cards that are known not to be broken, |
* that is has the 2nd generation multifunction PCI interface |
*/ |
if (rdev->family == CHIP_RV280 || |
rdev->family >= CHIP_RV350) { |
WREG32_P(RADEON_HOST_PATH_CNTL, RADEON_HDP_APER_CNTL, |
~RADEON_HDP_APER_CNTL); |
DRM_INFO("Generation 2 PCI interface, using max accessible memory\n"); |
return aper_size * 2; |
} |
|
/* Older cards have all sorts of funny issues to deal with. First |
* check if it's a multifunction card by reading the PCI config |
* header type... Limit those to one aperture size |
*/ |
// pci_read_config_byte(rdev->pdev, 0xe, &byte); |
// if (byte & 0x80) { |
// DRM_INFO("Generation 1 PCI interface in multifunction mode\n"); |
// DRM_INFO("Limiting VRAM to one aperture\n"); |
// return aper_size; |
// } |
|
/* Single function older card. We read HDP_APER_CNTL to see how the BIOS |
* have set it up. We don't write this as it's broken on some ASICs but |
* we expect the BIOS to have done the right thing (might be too optimistic...) |
*/ |
if (RREG32(RADEON_HOST_PATH_CNTL) & RADEON_HDP_APER_CNTL) |
return aper_size * 2; |
return aper_size; |
} |
|
void r100_vram_init_sizes(struct radeon_device *rdev) |
{ |
u64 config_aper_size; |
u32 accessible; |
|
config_aper_size = RREG32(RADEON_CONFIG_APER_SIZE); |
|
if (rdev->flags & RADEON_IS_IGP) { |
uint32_t tom; |
/* read NB_TOM to get the amount of ram stolen for the GPU */ |
tom = RREG32(RADEON_NB_TOM); |
rdev->mc.vram_size = (((tom >> 16) - (tom & 0xffff) + 1) << 16); |
WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.vram_size); |
rdev->mc.real_vram_size = (((tom >> 16) - (tom & 0xffff) + 1) << 16); |
/* for IGPs we need to keep VRAM where it was put by the BIOS */ |
rdev->mc.vram_location = (tom & 0xffff) << 16; |
WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size); |
rdev->mc.mc_vram_size = rdev->mc.real_vram_size; |
} else { |
rdev->mc.vram_size = RREG32(RADEON_CONFIG_MEMSIZE); |
rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE); |
/* Some production boards of m6 will report 0 |
* if it's 8 MB |
*/ |
if (rdev->mc.vram_size == 0) { |
rdev->mc.vram_size = 8192 * 1024; |
WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.vram_size); |
if (rdev->mc.real_vram_size == 0) { |
rdev->mc.real_vram_size = 8192 * 1024; |
WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size); |
} |
/* let driver place VRAM */ |
rdev->mc.vram_location = 0xFFFFFFFFUL; |
/* Fix for RN50, M6, M7 with 8/16/32(??) MBs of VRAM - |
* Novell bug 204882 + along with lots of ubuntu ones */ |
if (config_aper_size > rdev->mc.real_vram_size) |
rdev->mc.mc_vram_size = config_aper_size; |
else |
rdev->mc.mc_vram_size = rdev->mc.real_vram_size; |
} |
|
/* work out accessible VRAM */ |
accessible = r100_get_accessible_vram(rdev); |
|
rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0); |
rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0); |
|
if (accessible > rdev->mc.aper_size) |
accessible = rdev->mc.aper_size; |
|
if (rdev->mc.mc_vram_size > rdev->mc.aper_size) |
rdev->mc.mc_vram_size = rdev->mc.aper_size; |
|
if (rdev->mc.real_vram_size > rdev->mc.aper_size) |
rdev->mc.real_vram_size = rdev->mc.aper_size; |
} |
|
void r100_vga_set_state(struct radeon_device *rdev, bool state) |
{ |
uint32_t temp; |
|
temp = RREG32(RADEON_CONFIG_CNTL); |
if (state == false) { |
temp &= ~(1<<8); |
temp |= (1<<9); |
} else { |
temp &= ~(1<<9); |
} |
WREG32(RADEON_CONFIG_CNTL, temp); |
} |
|
void r100_vram_info(struct radeon_device *rdev) |
{ |
r100_vram_get_type(rdev); |
|
r100_vram_init_sizes(rdev); |
} |
|
|
/* |
* Indirect registers accessor |
*/ |
1350,28 → 1922,17 |
r100_pll_errata_after_data(rdev); |
} |
|
uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg) |
int r100_init(struct radeon_device *rdev) |
{ |
if (reg < 0x10000) |
return readl(((void __iomem *)rdev->rmmio) + reg); |
else { |
writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX); |
return readl(((void __iomem *)rdev->rmmio) + RADEON_MM_DATA); |
if (ASIC_IS_RN50(rdev)) { |
rdev->config.r100.reg_safe_bm = rn50_reg_safe_bm; |
rdev->config.r100.reg_safe_bm_size = ARRAY_SIZE(rn50_reg_safe_bm); |
} else if (rdev->family < CHIP_R200) { |
rdev->config.r100.reg_safe_bm = r100_reg_safe_bm; |
rdev->config.r100.reg_safe_bm_size = ARRAY_SIZE(r100_reg_safe_bm); |
} else { |
return r200_init(rdev); |
} |
} |
|
void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) |
{ |
if (reg < 0x10000) |
writel(v, ((void __iomem *)rdev->rmmio) + reg); |
else { |
writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX); |
writel(v, ((void __iomem *)rdev->rmmio) + RADEON_MM_DATA); |
} |
} |
|
int r100_init(struct radeon_device *rdev) |
{ |
return 0; |
} |
|
1545,3 → 2106,611 |
return 0; |
#endif |
} |
|
int r100_set_surface_reg(struct radeon_device *rdev, int reg, |
uint32_t tiling_flags, uint32_t pitch, |
uint32_t offset, uint32_t obj_size) |
{ |
int surf_index = reg * 16; |
int flags = 0; |
|
/* r100/r200 divide by 16 */ |
if (rdev->family < CHIP_R300) |
flags = pitch / 16; |
else |
flags = pitch / 8; |
|
if (rdev->family <= CHIP_RS200) { |
if ((tiling_flags & (RADEON_TILING_MACRO|RADEON_TILING_MICRO)) |
== (RADEON_TILING_MACRO|RADEON_TILING_MICRO)) |
flags |= RADEON_SURF_TILE_COLOR_BOTH; |
if (tiling_flags & RADEON_TILING_MACRO) |
flags |= RADEON_SURF_TILE_COLOR_MACRO; |
} else if (rdev->family <= CHIP_RV280) { |
if (tiling_flags & (RADEON_TILING_MACRO)) |
flags |= R200_SURF_TILE_COLOR_MACRO; |
if (tiling_flags & RADEON_TILING_MICRO) |
flags |= R200_SURF_TILE_COLOR_MICRO; |
} else { |
if (tiling_flags & RADEON_TILING_MACRO) |
flags |= R300_SURF_TILE_MACRO; |
if (tiling_flags & RADEON_TILING_MICRO) |
flags |= R300_SURF_TILE_MICRO; |
} |
|
if (tiling_flags & RADEON_TILING_SWAP_16BIT) |
flags |= RADEON_SURF_AP0_SWP_16BPP | RADEON_SURF_AP1_SWP_16BPP; |
if (tiling_flags & RADEON_TILING_SWAP_32BIT) |
flags |= RADEON_SURF_AP0_SWP_32BPP | RADEON_SURF_AP1_SWP_32BPP; |
|
DRM_DEBUG("writing surface %d %d %x %x\n", reg, flags, offset, offset+obj_size-1); |
WREG32(RADEON_SURFACE0_INFO + surf_index, flags); |
WREG32(RADEON_SURFACE0_LOWER_BOUND + surf_index, offset); |
WREG32(RADEON_SURFACE0_UPPER_BOUND + surf_index, offset + obj_size - 1); |
return 0; |
} |
|
void r100_clear_surface_reg(struct radeon_device *rdev, int reg) |
{ |
int surf_index = reg * 16; |
WREG32(RADEON_SURFACE0_INFO + surf_index, 0); |
} |
|
void r100_bandwidth_update(struct radeon_device *rdev) |
{ |
fixed20_12 trcd_ff, trp_ff, tras_ff, trbs_ff, tcas_ff; |
fixed20_12 sclk_ff, mclk_ff, sclk_eff_ff, sclk_delay_ff; |
fixed20_12 peak_disp_bw, mem_bw, pix_clk, pix_clk2, temp_ff, crit_point_ff; |
uint32_t temp, data, mem_trcd, mem_trp, mem_tras; |
fixed20_12 memtcas_ff[8] = { |
fixed_init(1), |
fixed_init(2), |
fixed_init(3), |
fixed_init(0), |
fixed_init_half(1), |
fixed_init_half(2), |
fixed_init(0), |
}; |
fixed20_12 memtcas_rs480_ff[8] = { |
fixed_init(0), |
fixed_init(1), |
fixed_init(2), |
fixed_init(3), |
fixed_init(0), |
fixed_init_half(1), |
fixed_init_half(2), |
fixed_init_half(3), |
}; |
fixed20_12 memtcas2_ff[8] = { |
fixed_init(0), |
fixed_init(1), |
fixed_init(2), |
fixed_init(3), |
fixed_init(4), |
fixed_init(5), |
fixed_init(6), |
fixed_init(7), |
}; |
fixed20_12 memtrbs[8] = { |
fixed_init(1), |
fixed_init_half(1), |
fixed_init(2), |
fixed_init_half(2), |
fixed_init(3), |
fixed_init_half(3), |
fixed_init(4), |
fixed_init_half(4) |
}; |
fixed20_12 memtrbs_r4xx[8] = { |
fixed_init(4), |
fixed_init(5), |
fixed_init(6), |
fixed_init(7), |
fixed_init(8), |
fixed_init(9), |
fixed_init(10), |
fixed_init(11) |
}; |
fixed20_12 min_mem_eff; |
fixed20_12 mc_latency_sclk, mc_latency_mclk, k1; |
fixed20_12 cur_latency_mclk, cur_latency_sclk; |
fixed20_12 disp_latency, disp_latency_overhead, disp_drain_rate, |
disp_drain_rate2, read_return_rate; |
fixed20_12 time_disp1_drop_priority; |
int c; |
int cur_size = 16; /* in octawords */ |
int critical_point = 0, critical_point2; |
/* uint32_t read_return_rate, time_disp1_drop_priority; */ |
int stop_req, max_stop_req; |
struct drm_display_mode *mode1 = NULL; |
struct drm_display_mode *mode2 = NULL; |
uint32_t pixel_bytes1 = 0; |
uint32_t pixel_bytes2 = 0; |
|
if (rdev->mode_info.crtcs[0]->base.enabled) { |
mode1 = &rdev->mode_info.crtcs[0]->base.mode; |
pixel_bytes1 = rdev->mode_info.crtcs[0]->base.fb->bits_per_pixel / 8; |
} |
if (rdev->mode_info.crtcs[1]->base.enabled) { |
mode2 = &rdev->mode_info.crtcs[1]->base.mode; |
pixel_bytes2 = rdev->mode_info.crtcs[1]->base.fb->bits_per_pixel / 8; |
} |
|
min_mem_eff.full = rfixed_const_8(0); |
/* get modes */ |
if ((rdev->disp_priority == 2) && ASIC_IS_R300(rdev)) { |
uint32_t mc_init_misc_lat_timer = RREG32(R300_MC_INIT_MISC_LAT_TIMER); |
mc_init_misc_lat_timer &= ~(R300_MC_DISP1R_INIT_LAT_MASK << R300_MC_DISP1R_INIT_LAT_SHIFT); |
mc_init_misc_lat_timer &= ~(R300_MC_DISP0R_INIT_LAT_MASK << R300_MC_DISP0R_INIT_LAT_SHIFT); |
/* check crtc enables */ |
if (mode2) |
mc_init_misc_lat_timer |= (1 << R300_MC_DISP1R_INIT_LAT_SHIFT); |
if (mode1) |
mc_init_misc_lat_timer |= (1 << R300_MC_DISP0R_INIT_LAT_SHIFT); |
WREG32(R300_MC_INIT_MISC_LAT_TIMER, mc_init_misc_lat_timer); |
} |
|
/* |
* determine is there is enough bw for current mode |
*/ |
mclk_ff.full = rfixed_const(rdev->clock.default_mclk); |
temp_ff.full = rfixed_const(100); |
mclk_ff.full = rfixed_div(mclk_ff, temp_ff); |
sclk_ff.full = rfixed_const(rdev->clock.default_sclk); |
sclk_ff.full = rfixed_div(sclk_ff, temp_ff); |
|
temp = (rdev->mc.vram_width / 8) * (rdev->mc.vram_is_ddr ? 2 : 1); |
temp_ff.full = rfixed_const(temp); |
mem_bw.full = rfixed_mul(mclk_ff, temp_ff); |
|
pix_clk.full = 0; |
pix_clk2.full = 0; |
peak_disp_bw.full = 0; |
if (mode1) { |
temp_ff.full = rfixed_const(1000); |
pix_clk.full = rfixed_const(mode1->clock); /* convert to fixed point */ |
pix_clk.full = rfixed_div(pix_clk, temp_ff); |
temp_ff.full = rfixed_const(pixel_bytes1); |
peak_disp_bw.full += rfixed_mul(pix_clk, temp_ff); |
} |
if (mode2) { |
temp_ff.full = rfixed_const(1000); |
pix_clk2.full = rfixed_const(mode2->clock); /* convert to fixed point */ |
pix_clk2.full = rfixed_div(pix_clk2, temp_ff); |
temp_ff.full = rfixed_const(pixel_bytes2); |
peak_disp_bw.full += rfixed_mul(pix_clk2, temp_ff); |
} |
|
mem_bw.full = rfixed_mul(mem_bw, min_mem_eff); |
if (peak_disp_bw.full >= mem_bw.full) { |
DRM_ERROR("You may not have enough display bandwidth for current mode\n" |
"If you have flickering problem, try to lower resolution, refresh rate, or color depth\n"); |
} |
|
/* Get values from the EXT_MEM_CNTL register...converting its contents. */ |
temp = RREG32(RADEON_MEM_TIMING_CNTL); |
if ((rdev->family == CHIP_RV100) || (rdev->flags & RADEON_IS_IGP)) { /* RV100, M6, IGPs */ |
mem_trcd = ((temp >> 2) & 0x3) + 1; |
mem_trp = ((temp & 0x3)) + 1; |
mem_tras = ((temp & 0x70) >> 4) + 1; |
} else if (rdev->family == CHIP_R300 || |
rdev->family == CHIP_R350) { /* r300, r350 */ |
mem_trcd = (temp & 0x7) + 1; |
mem_trp = ((temp >> 8) & 0x7) + 1; |
mem_tras = ((temp >> 11) & 0xf) + 4; |
} else if (rdev->family == CHIP_RV350 || |
rdev->family <= CHIP_RV380) { |
/* rv3x0 */ |
mem_trcd = (temp & 0x7) + 3; |
mem_trp = ((temp >> 8) & 0x7) + 3; |
mem_tras = ((temp >> 11) & 0xf) + 6; |
} else if (rdev->family == CHIP_R420 || |
rdev->family == CHIP_R423 || |
rdev->family == CHIP_RV410) { |
/* r4xx */ |
mem_trcd = (temp & 0xf) + 3; |
if (mem_trcd > 15) |
mem_trcd = 15; |
mem_trp = ((temp >> 8) & 0xf) + 3; |
if (mem_trp > 15) |
mem_trp = 15; |
mem_tras = ((temp >> 12) & 0x1f) + 6; |
if (mem_tras > 31) |
mem_tras = 31; |
} else { /* RV200, R200 */ |
mem_trcd = (temp & 0x7) + 1; |
mem_trp = ((temp >> 8) & 0x7) + 1; |
mem_tras = ((temp >> 12) & 0xf) + 4; |
} |
/* convert to FF */ |
trcd_ff.full = rfixed_const(mem_trcd); |
trp_ff.full = rfixed_const(mem_trp); |
tras_ff.full = rfixed_const(mem_tras); |
|
/* Get values from the MEM_SDRAM_MODE_REG register...converting its */ |
temp = RREG32(RADEON_MEM_SDRAM_MODE_REG); |
data = (temp & (7 << 20)) >> 20; |
if ((rdev->family == CHIP_RV100) || rdev->flags & RADEON_IS_IGP) { |
if (rdev->family == CHIP_RS480) /* don't think rs400 */ |
tcas_ff = memtcas_rs480_ff[data]; |
else |
tcas_ff = memtcas_ff[data]; |
} else |
tcas_ff = memtcas2_ff[data]; |
|
if (rdev->family == CHIP_RS400 || |
rdev->family == CHIP_RS480) { |
/* extra cas latency stored in bits 23-25 0-4 clocks */ |
data = (temp >> 23) & 0x7; |
if (data < 5) |
tcas_ff.full += rfixed_const(data); |
} |
|
if (ASIC_IS_R300(rdev) && !(rdev->flags & RADEON_IS_IGP)) { |
/* on the R300, Tcas is included in Trbs. |
*/ |
temp = RREG32(RADEON_MEM_CNTL); |
data = (R300_MEM_NUM_CHANNELS_MASK & temp); |
if (data == 1) { |
if (R300_MEM_USE_CD_CH_ONLY & temp) { |
temp = RREG32(R300_MC_IND_INDEX); |
temp &= ~R300_MC_IND_ADDR_MASK; |
temp |= R300_MC_READ_CNTL_CD_mcind; |
WREG32(R300_MC_IND_INDEX, temp); |
temp = RREG32(R300_MC_IND_DATA); |
data = (R300_MEM_RBS_POSITION_C_MASK & temp); |
} else { |
temp = RREG32(R300_MC_READ_CNTL_AB); |
data = (R300_MEM_RBS_POSITION_A_MASK & temp); |
} |
} else { |
temp = RREG32(R300_MC_READ_CNTL_AB); |
data = (R300_MEM_RBS_POSITION_A_MASK & temp); |
} |
if (rdev->family == CHIP_RV410 || |
rdev->family == CHIP_R420 || |
rdev->family == CHIP_R423) |
trbs_ff = memtrbs_r4xx[data]; |
else |
trbs_ff = memtrbs[data]; |
tcas_ff.full += trbs_ff.full; |
} |
|
sclk_eff_ff.full = sclk_ff.full; |
|
if (rdev->flags & RADEON_IS_AGP) { |
fixed20_12 agpmode_ff; |
agpmode_ff.full = rfixed_const(radeon_agpmode); |
temp_ff.full = rfixed_const_666(16); |
sclk_eff_ff.full -= rfixed_mul(agpmode_ff, temp_ff); |
} |
/* TODO PCIE lanes may affect this - agpmode == 16?? */ |
|
if (ASIC_IS_R300(rdev)) { |
sclk_delay_ff.full = rfixed_const(250); |
} else { |
if ((rdev->family == CHIP_RV100) || |
rdev->flags & RADEON_IS_IGP) { |
if (rdev->mc.vram_is_ddr) |
sclk_delay_ff.full = rfixed_const(41); |
else |
sclk_delay_ff.full = rfixed_const(33); |
} else { |
if (rdev->mc.vram_width == 128) |
sclk_delay_ff.full = rfixed_const(57); |
else |
sclk_delay_ff.full = rfixed_const(41); |
} |
} |
|
mc_latency_sclk.full = rfixed_div(sclk_delay_ff, sclk_eff_ff); |
|
if (rdev->mc.vram_is_ddr) { |
if (rdev->mc.vram_width == 32) { |
k1.full = rfixed_const(40); |
c = 3; |
} else { |
k1.full = rfixed_const(20); |
c = 1; |
} |
} else { |
k1.full = rfixed_const(40); |
c = 3; |
} |
|
temp_ff.full = rfixed_const(2); |
mc_latency_mclk.full = rfixed_mul(trcd_ff, temp_ff); |
temp_ff.full = rfixed_const(c); |
mc_latency_mclk.full += rfixed_mul(tcas_ff, temp_ff); |
temp_ff.full = rfixed_const(4); |
mc_latency_mclk.full += rfixed_mul(tras_ff, temp_ff); |
mc_latency_mclk.full += rfixed_mul(trp_ff, temp_ff); |
mc_latency_mclk.full += k1.full; |
|
mc_latency_mclk.full = rfixed_div(mc_latency_mclk, mclk_ff); |
mc_latency_mclk.full += rfixed_div(temp_ff, sclk_eff_ff); |
|
/* |
HW cursor time assuming worst case of full size colour cursor. |
*/ |
temp_ff.full = rfixed_const((2 * (cur_size - (rdev->mc.vram_is_ddr + 1)))); |
temp_ff.full += trcd_ff.full; |
if (temp_ff.full < tras_ff.full) |
temp_ff.full = tras_ff.full; |
cur_latency_mclk.full = rfixed_div(temp_ff, mclk_ff); |
|
temp_ff.full = rfixed_const(cur_size); |
cur_latency_sclk.full = rfixed_div(temp_ff, sclk_eff_ff); |
/* |
Find the total latency for the display data. |
*/ |
disp_latency_overhead.full = rfixed_const(80); |
disp_latency_overhead.full = rfixed_div(disp_latency_overhead, sclk_ff); |
mc_latency_mclk.full += disp_latency_overhead.full + cur_latency_mclk.full; |
mc_latency_sclk.full += disp_latency_overhead.full + cur_latency_sclk.full; |
|
if (mc_latency_mclk.full > mc_latency_sclk.full) |
disp_latency.full = mc_latency_mclk.full; |
else |
disp_latency.full = mc_latency_sclk.full; |
|
/* setup Max GRPH_STOP_REQ default value */ |
if (ASIC_IS_RV100(rdev)) |
max_stop_req = 0x5c; |
else |
max_stop_req = 0x7c; |
|
if (mode1) { |
/* CRTC1 |
Set GRPH_BUFFER_CNTL register using h/w defined optimal values. |
GRPH_STOP_REQ <= MIN[ 0x7C, (CRTC_H_DISP + 1) * (bit depth) / 0x10 ] |
*/ |
stop_req = mode1->hdisplay * pixel_bytes1 / 16; |
|
if (stop_req > max_stop_req) |
stop_req = max_stop_req; |
|
/* |
Find the drain rate of the display buffer. |
*/ |
temp_ff.full = rfixed_const((16/pixel_bytes1)); |
disp_drain_rate.full = rfixed_div(pix_clk, temp_ff); |
|
/* |
Find the critical point of the display buffer. |
*/ |
crit_point_ff.full = rfixed_mul(disp_drain_rate, disp_latency); |
crit_point_ff.full += rfixed_const_half(0); |
|
critical_point = rfixed_trunc(crit_point_ff); |
|
if (rdev->disp_priority == 2) { |
critical_point = 0; |
} |
|
/* |
The critical point should never be above max_stop_req-4. Setting |
GRPH_CRITICAL_CNTL = 0 will thus force high priority all the time. |
*/ |
if (max_stop_req - critical_point < 4) |
critical_point = 0; |
|
if (critical_point == 0 && mode2 && rdev->family == CHIP_R300) { |
/* some R300 cards have problem with this set to 0, when CRTC2 is enabled.*/ |
critical_point = 0x10; |
} |
|
temp = RREG32(RADEON_GRPH_BUFFER_CNTL); |
temp &= ~(RADEON_GRPH_STOP_REQ_MASK); |
temp |= (stop_req << RADEON_GRPH_STOP_REQ_SHIFT); |
temp &= ~(RADEON_GRPH_START_REQ_MASK); |
if ((rdev->family == CHIP_R350) && |
(stop_req > 0x15)) { |
stop_req -= 0x10; |
} |
temp |= (stop_req << RADEON_GRPH_START_REQ_SHIFT); |
temp |= RADEON_GRPH_BUFFER_SIZE; |
temp &= ~(RADEON_GRPH_CRITICAL_CNTL | |
RADEON_GRPH_CRITICAL_AT_SOF | |
RADEON_GRPH_STOP_CNTL); |
/* |
Write the result into the register. |
*/ |
WREG32(RADEON_GRPH_BUFFER_CNTL, ((temp & ~RADEON_GRPH_CRITICAL_POINT_MASK) | |
(critical_point << RADEON_GRPH_CRITICAL_POINT_SHIFT))); |
|
#if 0 |
if ((rdev->family == CHIP_RS400) || |
(rdev->family == CHIP_RS480)) { |
/* attempt to program RS400 disp regs correctly ??? */ |
temp = RREG32(RS400_DISP1_REG_CNTL); |
temp &= ~(RS400_DISP1_START_REQ_LEVEL_MASK | |
RS400_DISP1_STOP_REQ_LEVEL_MASK); |
WREG32(RS400_DISP1_REQ_CNTL1, (temp | |
(critical_point << RS400_DISP1_START_REQ_LEVEL_SHIFT) | |
(critical_point << RS400_DISP1_STOP_REQ_LEVEL_SHIFT))); |
temp = RREG32(RS400_DMIF_MEM_CNTL1); |
temp &= ~(RS400_DISP1_CRITICAL_POINT_START_MASK | |
RS400_DISP1_CRITICAL_POINT_STOP_MASK); |
WREG32(RS400_DMIF_MEM_CNTL1, (temp | |
(critical_point << RS400_DISP1_CRITICAL_POINT_START_SHIFT) | |
(critical_point << RS400_DISP1_CRITICAL_POINT_STOP_SHIFT))); |
} |
#endif |
|
DRM_DEBUG("GRPH_BUFFER_CNTL from to %x\n", |
/* (unsigned int)info->SavedReg->grph_buffer_cntl, */ |
(unsigned int)RREG32(RADEON_GRPH_BUFFER_CNTL)); |
} |
|
if (mode2) { |
u32 grph2_cntl; |
stop_req = mode2->hdisplay * pixel_bytes2 / 16; |
|
if (stop_req > max_stop_req) |
stop_req = max_stop_req; |
|
/* |
Find the drain rate of the display buffer. |
*/ |
temp_ff.full = rfixed_const((16/pixel_bytes2)); |
disp_drain_rate2.full = rfixed_div(pix_clk2, temp_ff); |
|
grph2_cntl = RREG32(RADEON_GRPH2_BUFFER_CNTL); |
grph2_cntl &= ~(RADEON_GRPH_STOP_REQ_MASK); |
grph2_cntl |= (stop_req << RADEON_GRPH_STOP_REQ_SHIFT); |
grph2_cntl &= ~(RADEON_GRPH_START_REQ_MASK); |
if ((rdev->family == CHIP_R350) && |
(stop_req > 0x15)) { |
stop_req -= 0x10; |
} |
grph2_cntl |= (stop_req << RADEON_GRPH_START_REQ_SHIFT); |
grph2_cntl |= RADEON_GRPH_BUFFER_SIZE; |
grph2_cntl &= ~(RADEON_GRPH_CRITICAL_CNTL | |
RADEON_GRPH_CRITICAL_AT_SOF | |
RADEON_GRPH_STOP_CNTL); |
|
if ((rdev->family == CHIP_RS100) || |
(rdev->family == CHIP_RS200)) |
critical_point2 = 0; |
else { |
temp = (rdev->mc.vram_width * rdev->mc.vram_is_ddr + 1)/128; |
temp_ff.full = rfixed_const(temp); |
temp_ff.full = rfixed_mul(mclk_ff, temp_ff); |
if (sclk_ff.full < temp_ff.full) |
temp_ff.full = sclk_ff.full; |
|
read_return_rate.full = temp_ff.full; |
|
if (mode1) { |
temp_ff.full = read_return_rate.full - disp_drain_rate.full; |
time_disp1_drop_priority.full = rfixed_div(crit_point_ff, temp_ff); |
} else { |
time_disp1_drop_priority.full = 0; |
} |
crit_point_ff.full = disp_latency.full + time_disp1_drop_priority.full + disp_latency.full; |
crit_point_ff.full = rfixed_mul(crit_point_ff, disp_drain_rate2); |
crit_point_ff.full += rfixed_const_half(0); |
|
critical_point2 = rfixed_trunc(crit_point_ff); |
|
if (rdev->disp_priority == 2) { |
critical_point2 = 0; |
} |
|
if (max_stop_req - critical_point2 < 4) |
critical_point2 = 0; |
|
} |
|
if (critical_point2 == 0 && rdev->family == CHIP_R300) { |
/* some R300 cards have problem with this set to 0 */ |
critical_point2 = 0x10; |
} |
|
WREG32(RADEON_GRPH2_BUFFER_CNTL, ((grph2_cntl & ~RADEON_GRPH_CRITICAL_POINT_MASK) | |
(critical_point2 << RADEON_GRPH_CRITICAL_POINT_SHIFT))); |
|
if ((rdev->family == CHIP_RS400) || |
(rdev->family == CHIP_RS480)) { |
#if 0 |
/* attempt to program RS400 disp2 regs correctly ??? */ |
temp = RREG32(RS400_DISP2_REQ_CNTL1); |
temp &= ~(RS400_DISP2_START_REQ_LEVEL_MASK | |
RS400_DISP2_STOP_REQ_LEVEL_MASK); |
WREG32(RS400_DISP2_REQ_CNTL1, (temp | |
(critical_point2 << RS400_DISP1_START_REQ_LEVEL_SHIFT) | |
(critical_point2 << RS400_DISP1_STOP_REQ_LEVEL_SHIFT))); |
temp = RREG32(RS400_DISP2_REQ_CNTL2); |
temp &= ~(RS400_DISP2_CRITICAL_POINT_START_MASK | |
RS400_DISP2_CRITICAL_POINT_STOP_MASK); |
WREG32(RS400_DISP2_REQ_CNTL2, (temp | |
(critical_point2 << RS400_DISP2_CRITICAL_POINT_START_SHIFT) | |
(critical_point2 << RS400_DISP2_CRITICAL_POINT_STOP_SHIFT))); |
#endif |
WREG32(RS400_DISP2_REQ_CNTL1, 0x105DC1CC); |
WREG32(RS400_DISP2_REQ_CNTL2, 0x2749D000); |
WREG32(RS400_DMIF_MEM_CNTL1, 0x29CA71DC); |
WREG32(RS400_DISP1_REQ_CNTL1, 0x28FBC3AC); |
} |
|
DRM_DEBUG("GRPH2_BUFFER_CNTL from to %x\n", |
(unsigned int)RREG32(RADEON_GRPH2_BUFFER_CNTL)); |
} |
} |
|
|
|
|
void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save) |
{ |
/* Shutdown CP we shouldn't need to do that but better be safe than |
* sorry |
*/ |
rdev->cp.ready = false; |
WREG32(R_000740_CP_CSQ_CNTL, 0); |
|
/* Save few CRTC registers */ |
save->GENMO_WT = RREG32(R_0003C0_GENMO_WT); |
save->CRTC_EXT_CNTL = RREG32(R_000054_CRTC_EXT_CNTL); |
save->CRTC_GEN_CNTL = RREG32(R_000050_CRTC_GEN_CNTL); |
save->CUR_OFFSET = RREG32(R_000260_CUR_OFFSET); |
if (!(rdev->flags & RADEON_SINGLE_CRTC)) { |
save->CRTC2_GEN_CNTL = RREG32(R_0003F8_CRTC2_GEN_CNTL); |
save->CUR2_OFFSET = RREG32(R_000360_CUR2_OFFSET); |
} |
|
/* Disable VGA aperture access */ |
WREG32(R_0003C0_GENMO_WT, C_0003C0_VGA_RAM_EN & save->GENMO_WT); |
/* Disable cursor, overlay, crtc */ |
WREG32(R_000260_CUR_OFFSET, save->CUR_OFFSET | S_000260_CUR_LOCK(1)); |
WREG32(R_000054_CRTC_EXT_CNTL, save->CRTC_EXT_CNTL | |
S_000054_CRTC_DISPLAY_DIS(1)); |
WREG32(R_000050_CRTC_GEN_CNTL, |
(C_000050_CRTC_CUR_EN & save->CRTC_GEN_CNTL) | |
S_000050_CRTC_DISP_REQ_EN_B(1)); |
WREG32(R_000420_OV0_SCALE_CNTL, |
C_000420_OV0_OVERLAY_EN & RREG32(R_000420_OV0_SCALE_CNTL)); |
WREG32(R_000260_CUR_OFFSET, C_000260_CUR_LOCK & save->CUR_OFFSET); |
if (!(rdev->flags & RADEON_SINGLE_CRTC)) { |
WREG32(R_000360_CUR2_OFFSET, save->CUR2_OFFSET | |
S_000360_CUR2_LOCK(1)); |
WREG32(R_0003F8_CRTC2_GEN_CNTL, |
(C_0003F8_CRTC2_CUR_EN & save->CRTC2_GEN_CNTL) | |
S_0003F8_CRTC2_DISPLAY_DIS(1) | |
S_0003F8_CRTC2_DISP_REQ_EN_B(1)); |
WREG32(R_000360_CUR2_OFFSET, |
C_000360_CUR2_LOCK & save->CUR2_OFFSET); |
} |
} |
|
void r100_mc_resume(struct radeon_device *rdev, struct r100_mc_save *save) |
{ |
/* Update base address for crtc */ |
WREG32(R_00023C_DISPLAY_BASE_ADDR, rdev->mc.vram_location); |
if (!(rdev->flags & RADEON_SINGLE_CRTC)) { |
WREG32(R_00033C_CRTC2_DISPLAY_BASE_ADDR, |
rdev->mc.vram_location); |
} |
/* Restore CRTC registers */ |
WREG32(R_0003C0_GENMO_WT, save->GENMO_WT); |
WREG32(R_000054_CRTC_EXT_CNTL, save->CRTC_EXT_CNTL); |
WREG32(R_000050_CRTC_GEN_CNTL, save->CRTC_GEN_CNTL); |
if (!(rdev->flags & RADEON_SINGLE_CRTC)) { |
WREG32(R_0003F8_CRTC2_GEN_CNTL, save->CRTC2_GEN_CNTL); |
} |
} |
|
int drm_order(unsigned long size) |
{ |
int order; |
unsigned long tmp; |
|
for (order = 0, tmp = size >> 1; tmp; tmp >>= 1, order++) ; |
|
if (size & (size - 1)) |
++order; |
|
return order; |
} |
|