Subversion Repositories Kolibri OS

Compare Revisions

Regard whitespace Rev 1128 → Rev 1126

/drivers/video/drm/include/types.h
133,7 → 133,6
void* memcpy(void *s1, const void *s2, size_t n);
void* memset(void *s, int c, size_t n);
size_t strlen(const char *s);
char *strcpy(char *s1, const char *s2);
char *strncpy (char *dst, const char *src, size_t len);
 
void *malloc(size_t size);
/drivers/video/drm/radeon/rs400.c
File deleted
/drivers/video/drm/radeon/r600.c
File deleted
/drivers/video/drm/radeon/rs600.c
File deleted
/drivers/video/drm/radeon/rs690.c
File deleted
/drivers/video/drm/radeon/atombios_crtc.c
337,7 → 337,7
// return -EINVAL;
//}
 
fb_location = 0; //rdev->mc.vram_location;
fb_location = rdev->mc.vram_location;
 
dbgprintf("fb_location %x\n", fb_location);
dbgprintf("bpp %x\n", crtc->fb->bits_per_pixel);
708,5 → 708,3
#endif
WREG32(AVIVO_DC_LB_MEMORY_SPLIT, dc_lb_memory_split);
}
 
 
/drivers/video/drm/radeon/r300.c
173,10 → 173,10
rdev->asic->gart_set_page = &rv370_pcie_gart_set_page;
return rv370_pcie_gart_enable(rdev);
}
return r100_pci_gart_enable(rdev);
// return r100_pci_gart_enable(rdev);
}
 
 
#if 0
/*
* MC
*/
184,9 → 184,9
{
int r;
 
// if (r100_debugfs_rbbm_init(rdev)) {
// DRM_ERROR("Failed to register debugfs file for RBBM !\n");
// }
if (r100_debugfs_rbbm_init(rdev)) {
DRM_ERROR("Failed to register debugfs file for RBBM !\n");
}
 
r300_gpu_init(rdev);
r100_pci_gart_disable(rdev);
264,8 → 264,6
}
 
 
#if 0
 
/*
* Global GPU functions
*/
313,8 → 311,6
return r;
}
 
#endif
 
void r300_ring_start(struct radeon_device *rdev)
{
unsigned gb_tile_config;
718,7 → 714,6
}
 
 
#if 0
/*
* CS functions
*/
973,8 → 968,6
}
}
 
#endif
 
static const unsigned r300_reg_safe_bm[159] = {
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
0xFFFFFFBF, 0xFFFFFFFF, 0xFFFFFFBF, 0xFFFFFFFF,
1018,8 → 1011,6
0x0003FC01, 0xFFFFFFF8, 0xFE800B19,
};
 
#if 0
 
static int r300_packet0_check(struct radeon_cs_parser *p,
struct radeon_cs_packet *pkt,
unsigned idx, unsigned reg)
1533,8 → 1524,6
return 0;
}
 
#endif
 
int r300_init(struct radeon_device *rdev)
{
rdev->config.r300.reg_safe_bm = r300_reg_safe_bm;
1543,3 → 1532,4
}
 
 
#endif
/drivers/video/drm/radeon/radeon.h
60,10 → 60,7
extern int radeon_modeset;
extern int radeon_dynclks;
extern int radeon_r4xx_atom;
extern int radeon_agpmode;
extern int radeon_vram_limit;
extern int radeon_gart_size;
extern int radeon_benchmarking;
extern int radeon_connector_table;
 
/*
617,15 → 614,9
*(volatile uint32_t __force *) addr = b;
}
 
static inline void __raw_writeq(__u64 b, volatile void __iomem *addr)
{
*(volatile __u64 *)addr = b;
}
 
#define writeb __raw_writeb
#define writew __raw_writew
#define writel __raw_writel
#define writeq __raw_writeq
 
//#define writeb(b,addr) *(volatile uint8_t* ) addr = (uint8_t)b
//#define writew(b,addr) *(volatile uint16_t*) addr = (uint16_t)b
1210,6 → 1201,5
resource_size_t
drm_get_resource_len(struct drm_device *dev, unsigned int resource);
 
bool set_mode(struct drm_device *dev, int width, int height);
 
#endif
/drivers/video/drm/radeon/radeon_device.c
250,8 → 250,8
 
/* Don't change order as we are overridding accessor. */
if (rdev->family < CHIP_RV515) {
rdev->pcie_rreg = &rv370_pcie_rreg;
rdev->pcie_wreg = &rv370_pcie_wreg;
// rdev->pcie_rreg = &rv370_pcie_rreg;
// rdev->pcie_wreg = &rv370_pcie_wreg;
}
if (rdev->family >= CHIP_RV515) {
rdev->pcie_rreg = &rv515_pcie_rreg;
267,20 → 267,20
rdev->mc_wreg = &rv515_mc_wreg;
}
if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480) {
rdev->mc_rreg = &rs400_mc_rreg;
rdev->mc_wreg = &rs400_mc_wreg;
// rdev->mc_rreg = &rs400_mc_rreg;
// rdev->mc_wreg = &rs400_mc_wreg;
}
if (rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) {
rdev->mc_rreg = &rs690_mc_rreg;
rdev->mc_wreg = &rs690_mc_wreg;
// rdev->mc_rreg = &rs690_mc_rreg;
// rdev->mc_wreg = &rs690_mc_wreg;
}
if (rdev->family == CHIP_RS600) {
rdev->mc_rreg = &rs600_mc_rreg;
rdev->mc_wreg = &rs600_mc_wreg;
// rdev->mc_rreg = &rs600_mc_rreg;
// rdev->mc_wreg = &rs600_mc_wreg;
}
if (rdev->family >= CHIP_R600) {
rdev->pciep_rreg = &r600_pciep_rreg;
rdev->pciep_wreg = &r600_pciep_wreg;
// rdev->pciep_rreg = &r600_pciep_rreg;
// rdev->pciep_wreg = &r600_pciep_wreg;
}
}
 
304,32 → 304,32
case CHIP_RV250:
case CHIP_RS300:
case CHIP_RV280:
rdev->asic = &r100_asic;
// rdev->asic = &r100_asic;
break;
case CHIP_R300:
case CHIP_R350:
case CHIP_RV350:
case CHIP_RV380:
rdev->asic = &r300_asic;
// rdev->asic = &r300_asic;
break;
case CHIP_R420:
case CHIP_R423:
case CHIP_RV410:
rdev->asic = &r420_asic;
// rdev->asic = &r420_asic;
break;
case CHIP_RS400:
case CHIP_RS480:
rdev->asic = &rs400_asic;
// rdev->asic = &rs400_asic;
break;
case CHIP_RS600:
rdev->asic = &rs600_asic;
// rdev->asic = &rs600_asic;
break;
case CHIP_RS690:
case CHIP_RS740:
rdev->asic = &rs690_asic;
// rdev->asic = &rs690_asic;
break;
case CHIP_RV515:
rdev->asic = &rv515_asic;
// rdev->asic = &rv515_asic;
break;
case CHIP_R520:
case CHIP_RV530:
454,7 → 454,7
 
int radeon_combios_init(struct radeon_device *rdev)
{
radeon_combios_initialize_bios_scratch_regs(rdev->ddev);
// radeon_combios_initialize_bios_scratch_regs(rdev->ddev);
return 0;
}
 
869,8 → 869,9
// driver->name, driver->major, driver->minor, driver->patchlevel,
// driver->date, pci_name(pdev), dev->primary->index);
 
set_mode(dev, 1024, 768);
drm_helper_resume_force_mode(dev);
 
 
return 0;
 
err_g4:
930,5 → 931,3
return rem;
}
 
 
 
/drivers/video/drm/radeon/radeon_fb.c
1173,101 → 1173,3
#undef BYTES_PER_LONG
}
 
static char *manufacturer_name(unsigned char *x)
{
static char name[4];
 
name[0] = ((x[0] & 0x7C) >> 2) + '@';
name[1] = ((x[0] & 0x03) << 3) + ((x[1] & 0xE0) >> 5) + '@';
name[2] = (x[1] & 0x1F) + '@';
name[3] = 0;
 
return name;
}
 
 
bool set_mode(struct drm_device *dev, int width, int height)
{
struct drm_connector *connector;
 
bool ret;
 
ENTRY();
 
list_for_each_entry(connector, &dev->mode_config.connector_list, head)
{
struct drm_display_mode *mode;
 
struct drm_encoder *encoder;
struct drm_crtc *crtc;
 
if( connector->status != connector_status_connected)
continue;
 
encoder = connector->encoder;
if( encoder == NULL)
continue;
 
crtc = encoder->crtc;
 
if(crtc == NULL)
continue;
 
list_for_each_entry(mode, &connector->modes, head)
{
char *con_name, *enc_name;
 
struct drm_framebuffer *fb;
 
if (drm_mode_width(mode) == width &&
drm_mode_height(mode) == height)
{
char con_edid[128];
 
fb = list_first_entry(&dev->mode_config.fb_kernel_list,
struct drm_framebuffer, filp_head);
 
memcpy(con_edid, connector->edid_blob_ptr->data, 128);
 
dbgprintf("Manufacturer: %s Model %x Serial Number %u\n",
manufacturer_name(con_edid + 0x08),
(unsigned short)(con_edid[0x0A] + (con_edid[0x0B] << 8)),
(unsigned int)(con_edid[0x0C] + (con_edid[0x0D] << 8)
+ (con_edid[0x0E] << 16) + (con_edid[0x0F] << 24)));
 
 
con_name = drm_get_connector_name(connector);
enc_name = drm_get_encoder_name(encoder);
 
dbgprintf("set mode %d %d connector %s encoder %s\n",
width, height, con_name, enc_name);
 
fb->width = width;
fb->height = height;
fb->pitch = radeon_align_pitch(dev->dev_private, width, 32)
* ((32 + 1) / 8);
 
crtc->fb = fb;
 
ret = drm_crtc_helper_set_mode(crtc, mode, 0, 0, fb);
 
sysSetScreen(width,height);
 
if (ret == true)
{
}
else
{
DRM_ERROR("failed to set mode %d_%d on crtc %p\n",
width, height, crtc);
};
 
return ret;
};
}
};
 
return false;
};
 
 
/drivers/video/drm/radeon/radeon_object.c
769,8 → 769,6
return ttm_fbdev_mmap(vma, &robj->tobj);
}
 
#endif
 
unsigned long radeon_object_size(struct radeon_object *robj)
{
return robj->tobj.num_pages << PAGE_SHIFT;
777,3 → 775,4
}
 
 
#endif
/drivers/video/drm/radeon/r100.c
46,6 → 46,7
void r100_gpu_wait_for_vsync2(struct radeon_device *rdev);
int r100_debugfs_mc_info_init(struct radeon_device *rdev);
 
#if 0
/*
* PCI GART
*/
104,7 → 105,6
WREG32(RADEON_AIC_HI_ADDR, 0);
}
 
 
int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
{
if (i < 0 || i > rdev->gart.num_gpu_pages) {
132,10 → 132,10
uint32_t ov0_scale_cntl, crtc_ext_cntl, crtc_gen_cntl, crtc2_gen_cntl;
 
/* FIXME: is this function correct for rs100,rs200,rs300 ? */
if (r100_gui_wait_for_idle(rdev)) {
printk(KERN_WARNING "Failed to wait GUI idle while "
"programming pipes. Bad things might happen.\n");
}
// if (r100_gui_wait_for_idle(rdev)) {
// printk(KERN_WARNING "Failed to wait GUI idle while "
// "programming pipes. Bad things might happen.\n");
// }
 
/* stop display and memory access */
ov0_scale_cntl = RREG32(RADEON_OV0_SCALE_CNTL);
168,10 → 168,10
uint32_t tmp;
int r;
 
// r = r100_debugfs_mc_info_init(rdev);
// if (r) {
// DRM_ERROR("Failed to register debugfs file for R100 MC !\n");
// }
r = r100_debugfs_mc_info_init(rdev);
if (r) {
DRM_ERROR("Failed to register debugfs file for R100 MC !\n");
}
/* Write VRAM size in case we are limiting it */
WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.vram_size);
tmp = rdev->mc.vram_location + rdev->mc.vram_size - 1;
206,9 → 206,9
{
int r;
 
// if (r100_debugfs_rbbm_init(rdev)) {
// DRM_ERROR("Failed to register debugfs file for RBBM !\n");
// }
if (r100_debugfs_rbbm_init(rdev)) {
DRM_ERROR("Failed to register debugfs file for RBBM !\n");
}
 
r100_gpu_init(rdev);
/* Disable gart which also disable out of gart access */
245,10 → 245,11
void r100_mc_fini(struct radeon_device *rdev)
{
r100_pci_gart_disable(rdev);
// radeon_gart_table_ram_free(rdev);
// radeon_gart_fini(rdev);
radeon_gart_table_ram_free(rdev);
radeon_gart_fini(rdev);
}
 
 
/*
* Fence emission
*/
267,7 → 268,8
radeon_ring_write(rdev, RADEON_SW_INT_FIRE);
}
 
#if 0
#endif
 
/*
* Writeback
*/
315,6 → 317,7
}
 
 
#if 0
int r100_copy_blit(struct radeon_device *rdev,
uint64_t src_offset,
uint64_t dst_offset,
388,7 → 391,6
return r;
}
 
#endif
 
/*
* CP
410,10 → 412,14
radeon_ring_unlock_commit(rdev);
}
 
#endif
 
static void r100_cp_load_microcode(struct radeon_device *rdev)
{
int i;
 
dbgprintf("%s\n",__FUNCTION__);
 
if (r100_gui_wait_for_idle(rdev)) {
printk(KERN_WARNING "Failed to wait GUI idle while "
"programming pipes. Bad things might happen.\n");
590,6 → 596,7
return 0;
}
 
#if 0
 
void r100_cp_fini(struct radeon_device *rdev)
{
612,6 → 619,7
}
}
 
#endif
 
int r100_cp_reset(struct radeon_device *rdev)
{
1039,7 → 1047,6
return 0;
}
 
#endif
 
/*
* Global GPU functions
1059,6 → 1066,7
}
}
 
#endif
 
 
/* Wait for vertical sync on primary CRTC */
1204,6 → 1212,8
return -1;
}
 
#if 0
 
int r100_gpu_reset(struct radeon_device *rdev)
{
uint32_t status;
1296,6 → 1306,8
rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
}
 
#endif
 
/*
* Indirect registers accessor
*/
/drivers/video/drm/radeon/r520.c
239,7 → 239,82
rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
}
 
/*
* Global GPU functions
*/
void rs600_disable_vga(struct radeon_device *rdev)
{
unsigned tmp;
dbgprintf("%s\n",__FUNCTION__);
 
WREG32(0x330, 0);
WREG32(0x338, 0);
tmp = RREG32(0x300);
tmp &= ~(3 << 16);
WREG32(0x300, tmp);
WREG32(0x308, (1 << 8));
WREG32(0x310, rdev->mc.vram_location);
WREG32(0x594, 0);
}
 
 
void r420_pipes_init(struct radeon_device *rdev)
{
unsigned tmp;
unsigned gb_pipe_select;
unsigned num_pipes;
 
dbgprintf("%s\n",__FUNCTION__);
 
/* GA_ENHANCE workaround TCL deadlock issue */
WREG32(0x4274, (1 << 0) | (1 << 1) | (1 << 2) | (1 << 3));
/* get max number of pipes */
gb_pipe_select = RREG32(0x402C);
num_pipes = ((gb_pipe_select >> 12) & 3) + 1;
rdev->num_gb_pipes = num_pipes;
tmp = 0;
switch (num_pipes) {
default:
/* force to 1 pipe */
num_pipes = 1;
case 1:
tmp = (0 << 1);
break;
case 2:
tmp = (3 << 1);
break;
case 3:
tmp = (6 << 1);
break;
case 4:
tmp = (7 << 1);
break;
}
WREG32(0x42C8, (1 << num_pipes) - 1);
/* Sub pixel 1/12 so we can have 4K rendering according to doc */
tmp |= (1 << 4) | (1 << 0);
WREG32(0x4018, tmp);
if (r100_gui_wait_for_idle(rdev)) {
printk(KERN_WARNING "Failed to wait GUI idle while "
"programming pipes. Bad things might happen.\n");
}
 
tmp = RREG32(0x170C);
WREG32(0x170C, tmp | (1 << 31));
 
WREG32(R300_RB2D_DSTCACHE_MODE,
RREG32(R300_RB2D_DSTCACHE_MODE) |
R300_DC_AUTOFLUSH_ENABLE |
R300_DC_DC_DISABLE_IGNORE_PE);
 
if (r100_gui_wait_for_idle(rdev)) {
printk(KERN_WARNING "Failed to wait GUI idle while "
"programming pipes. Bad things might happen.\n");
}
DRM_INFO("radeon: %d pipes initialized.\n", rdev->num_gb_pipes);
}
 
 
int radeon_agp_init(struct radeon_device *rdev)
{
 
359,8 → 434,38
}
 
 
void rs600_mc_disable_clients(struct radeon_device *rdev)
{
unsigned tmp;
dbgprintf("%s\n",__FUNCTION__);
 
if (r100_gui_wait_for_idle(rdev)) {
printk(KERN_WARNING "Failed to wait GUI idle while "
"programming pipes. Bad things might happen.\n");
}
 
tmp = RREG32(AVIVO_D1VGA_CONTROL);
WREG32(AVIVO_D1VGA_CONTROL, tmp & ~AVIVO_DVGA_CONTROL_MODE_ENABLE);
tmp = RREG32(AVIVO_D2VGA_CONTROL);
WREG32(AVIVO_D2VGA_CONTROL, tmp & ~AVIVO_DVGA_CONTROL_MODE_ENABLE);
 
tmp = RREG32(AVIVO_D1CRTC_CONTROL);
WREG32(AVIVO_D1CRTC_CONTROL, tmp & ~AVIVO_CRTC_EN);
tmp = RREG32(AVIVO_D2CRTC_CONTROL);
WREG32(AVIVO_D2CRTC_CONTROL, tmp & ~AVIVO_CRTC_EN);
 
/* make sure all previous write got through */
tmp = RREG32(AVIVO_D2CRTC_CONTROL);
 
mdelay(1);
 
dbgprintf("done\n");
 
}
 
 
 
 
void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev);
 
 
/drivers/video/drm/radeon/radeon_asic.h
61,18 → 61,20
void r100_ring_start(struct radeon_device *rdev);
int r100_irq_set(struct radeon_device *rdev);
int r100_irq_process(struct radeon_device *rdev);
void r100_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence);
int r100_cs_parse(struct radeon_cs_parser *p);
//void r100_fence_ring_emit(struct radeon_device *rdev,
// struct radeon_fence *fence);
//int r100_cs_parse(struct radeon_cs_parser *p);
void r100_pll_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
uint32_t r100_pll_rreg(struct radeon_device *rdev, uint32_t reg);
int r100_copy_blit(struct radeon_device *rdev,
uint64_t src_offset,
uint64_t dst_offset,
unsigned num_pages,
struct radeon_fence *fence);
//int r100_copy_blit(struct radeon_device *rdev,
// uint64_t src_offset,
// uint64_t dst_offset,
// unsigned num_pages,
// struct radeon_fence *fence);
 
 
#if 0
 
static struct radeon_asic r100_asic = {
.init = &r100_init,
.errata = &r100_errata,
80,27 → 82,27
.gpu_reset = &r100_gpu_reset,
.mc_init = &r100_mc_init,
.mc_fini = &r100_mc_fini,
// .wb_init = &r100_wb_init,
// .wb_fini = &r100_wb_fini,
.wb_init = &r100_wb_init,
.wb_fini = &r100_wb_fini,
.gart_enable = &r100_gart_enable,
.gart_disable = &r100_pci_gart_disable,
.gart_tlb_flush = &r100_pci_gart_tlb_flush,
.gart_set_page = &r100_pci_gart_set_page,
.cp_init = &r100_cp_init,
// .cp_fini = &r100_cp_fini,
// .cp_disable = &r100_cp_disable,
.cp_fini = &r100_cp_fini,
.cp_disable = &r100_cp_disable,
.ring_start = &r100_ring_start,
// .irq_set = &r100_irq_set,
// .irq_process = &r100_irq_process,
.irq_set = &r100_irq_set,
.irq_process = &r100_irq_process,
// .fence_ring_emit = &r100_fence_ring_emit,
// .cs_parse = &r100_cs_parse,
// .copy_blit = &r100_copy_blit,
// .copy_dma = NULL,
// .copy = &r100_copy_blit,
// .set_engine_clock = &radeon_legacy_set_engine_clock,
// .set_memory_clock = NULL,
// .set_pcie_lanes = NULL,
// .set_clock_gating = &radeon_legacy_set_clock_gating,
.set_engine_clock = &radeon_legacy_set_engine_clock,
.set_memory_clock = NULL,
.set_pcie_lanes = NULL,
.set_clock_gating = &radeon_legacy_set_clock_gating,
};
 
 
114,9 → 116,9
int r300_mc_init(struct radeon_device *rdev);
void r300_mc_fini(struct radeon_device *rdev);
void r300_ring_start(struct radeon_device *rdev);
void r300_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence);
int r300_cs_parse(struct radeon_cs_parser *p);
//void r300_fence_ring_emit(struct radeon_device *rdev,
// struct radeon_fence *fence);
//int r300_cs_parse(struct radeon_cs_parser *p);
int r300_gart_enable(struct radeon_device *rdev);
void rv370_pcie_gart_disable(struct radeon_device *rdev);
void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev);
124,11 → 126,11
uint32_t rv370_pcie_rreg(struct radeon_device *rdev, uint32_t reg);
void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes);
int r300_copy_dma(struct radeon_device *rdev,
uint64_t src_offset,
uint64_t dst_offset,
unsigned num_pages,
struct radeon_fence *fence);
//int r300_copy_dma(struct radeon_device *rdev,
// uint64_t src_offset,
// uint64_t dst_offset,
// unsigned num_pages,
// struct radeon_fence *fence);
 
 
static struct radeon_asic r300_asic = {
138,30 → 140,29
.gpu_reset = &r300_gpu_reset,
.mc_init = &r300_mc_init,
.mc_fini = &r300_mc_fini,
// .wb_init = &r100_wb_init,
// .wb_fini = &r100_wb_fini,
.wb_init = &r100_wb_init,
.wb_fini = &r100_wb_fini,
.gart_enable = &r300_gart_enable,
.gart_disable = &r100_pci_gart_disable,
.gart_tlb_flush = &r100_pci_gart_tlb_flush,
.gart_set_page = &r100_pci_gart_set_page,
.cp_init = &r100_cp_init,
// .cp_fini = &r100_cp_fini,
// .cp_disable = &r100_cp_disable,
.cp_fini = &r100_cp_fini,
.cp_disable = &r100_cp_disable,
.ring_start = &r300_ring_start,
// .irq_set = &r100_irq_set,
// .irq_process = &r100_irq_process,
.irq_set = &r100_irq_set,
.irq_process = &r100_irq_process,
// .fence_ring_emit = &r300_fence_ring_emit,
// .cs_parse = &r300_cs_parse,
// .copy_blit = &r100_copy_blit,
// .copy_dma = &r300_copy_dma,
// .copy = &r100_copy_blit,
// .set_engine_clock = &radeon_legacy_set_engine_clock,
// .set_memory_clock = NULL,
// .set_pcie_lanes = &rv370_set_pcie_lanes,
// .set_clock_gating = &radeon_legacy_set_clock_gating,
.set_engine_clock = &radeon_legacy_set_engine_clock,
.set_memory_clock = NULL,
.set_pcie_lanes = &rv370_set_pcie_lanes,
.set_clock_gating = &radeon_legacy_set_clock_gating,
};
 
 
/*
* r420,r423,rv410
*/
176,27 → 177,27
.gpu_reset = &r300_gpu_reset,
.mc_init = &r420_mc_init,
.mc_fini = &r420_mc_fini,
// .wb_init = &r100_wb_init,
// .wb_fini = &r100_wb_fini,
.wb_init = &r100_wb_init,
.wb_fini = &r100_wb_fini,
.gart_enable = &r300_gart_enable,
.gart_disable = &rv370_pcie_gart_disable,
.gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
.gart_set_page = &rv370_pcie_gart_set_page,
.cp_init = &r100_cp_init,
// .cp_fini = &r100_cp_fini,
// .cp_disable = &r100_cp_disable,
.cp_fini = &r100_cp_fini,
.cp_disable = &r100_cp_disable,
.ring_start = &r300_ring_start,
// .irq_set = &r100_irq_set,
// .irq_process = &r100_irq_process,
.irq_set = &r100_irq_set,
.irq_process = &r100_irq_process,
// .fence_ring_emit = &r300_fence_ring_emit,
// .cs_parse = &r300_cs_parse,
// .copy_blit = &r100_copy_blit,
// .copy_dma = &r300_copy_dma,
// .copy = &r100_copy_blit,
// .set_engine_clock = &radeon_atom_set_engine_clock,
// .set_memory_clock = &radeon_atom_set_memory_clock,
// .set_pcie_lanes = &rv370_set_pcie_lanes,
// .set_clock_gating = &radeon_atom_set_clock_gating,
.set_engine_clock = &radeon_atom_set_engine_clock,
.set_memory_clock = &radeon_atom_set_memory_clock,
.set_pcie_lanes = &rv370_set_pcie_lanes,
.set_clock_gating = &radeon_atom_set_clock_gating,
};
 
 
220,27 → 221,27
.gpu_reset = &r300_gpu_reset,
.mc_init = &rs400_mc_init,
.mc_fini = &rs400_mc_fini,
// .wb_init = &r100_wb_init,
// .wb_fini = &r100_wb_fini,
.wb_init = &r100_wb_init,
.wb_fini = &r100_wb_fini,
.gart_enable = &rs400_gart_enable,
.gart_disable = &rs400_gart_disable,
.gart_tlb_flush = &rs400_gart_tlb_flush,
.gart_set_page = &rs400_gart_set_page,
.cp_init = &r100_cp_init,
// .cp_fini = &r100_cp_fini,
// .cp_disable = &r100_cp_disable,
.cp_fini = &r100_cp_fini,
.cp_disable = &r100_cp_disable,
.ring_start = &r300_ring_start,
// .irq_set = &r100_irq_set,
// .irq_process = &r100_irq_process,
.irq_set = &r100_irq_set,
.irq_process = &r100_irq_process,
// .fence_ring_emit = &r300_fence_ring_emit,
// .cs_parse = &r300_cs_parse,
// .copy_blit = &r100_copy_blit,
// .copy_dma = &r300_copy_dma,
// .copy = &r100_copy_blit,
// .set_engine_clock = &radeon_legacy_set_engine_clock,
// .set_memory_clock = NULL,
// .set_pcie_lanes = NULL,
// .set_clock_gating = &radeon_legacy_set_clock_gating,
.set_engine_clock = &radeon_legacy_set_engine_clock,
.set_memory_clock = NULL,
.set_pcie_lanes = NULL,
.set_clock_gating = &radeon_legacy_set_clock_gating,
};
 
 
258,7 → 259,6
int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg);
void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
 
static struct radeon_asic rs600_asic = {
.init = &r300_init,
.errata = &rs600_errata,
266,27 → 266,27
.gpu_reset = &r300_gpu_reset,
.mc_init = &rs600_mc_init,
.mc_fini = &rs600_mc_fini,
// .wb_init = &r100_wb_init,
// .wb_fini = &r100_wb_fini,
.wb_init = &r100_wb_init,
.wb_fini = &r100_wb_fini,
.gart_enable = &rs600_gart_enable,
.gart_disable = &rs600_gart_disable,
.gart_tlb_flush = &rs600_gart_tlb_flush,
.gart_set_page = &rs600_gart_set_page,
.cp_init = &r100_cp_init,
// .cp_fini = &r100_cp_fini,
// .cp_disable = &r100_cp_disable,
.cp_fini = &r100_cp_fini,
.cp_disable = &r100_cp_disable,
.ring_start = &r300_ring_start,
// .irq_set = &rs600_irq_set,
// .irq_process = &r100_irq_process,
.irq_set = &rs600_irq_set,
.irq_process = &r100_irq_process,
// .fence_ring_emit = &r300_fence_ring_emit,
// .cs_parse = &r300_cs_parse,
// .copy_blit = &r100_copy_blit,
// .copy_dma = &r300_copy_dma,
// .copy = &r100_copy_blit,
// .set_engine_clock = &radeon_atom_set_engine_clock,
// .set_memory_clock = &radeon_atom_set_memory_clock,
// .set_pcie_lanes = NULL,
// .set_clock_gating = &radeon_atom_set_clock_gating,
.set_engine_clock = &radeon_atom_set_engine_clock,
.set_memory_clock = &radeon_atom_set_memory_clock,
.set_pcie_lanes = NULL,
.set_clock_gating = &radeon_atom_set_clock_gating,
};
 
 
306,29 → 306,30
.gpu_reset = &r300_gpu_reset,
.mc_init = &rs690_mc_init,
.mc_fini = &rs690_mc_fini,
// .wb_init = &r100_wb_init,
// .wb_fini = &r100_wb_fini,
.wb_init = &r100_wb_init,
.wb_fini = &r100_wb_fini,
.gart_enable = &rs400_gart_enable,
.gart_disable = &rs400_gart_disable,
.gart_tlb_flush = &rs400_gart_tlb_flush,
.gart_set_page = &rs400_gart_set_page,
.cp_init = &r100_cp_init,
// .cp_fini = &r100_cp_fini,
// .cp_disable = &r100_cp_disable,
.cp_fini = &r100_cp_fini,
.cp_disable = &r100_cp_disable,
.ring_start = &r300_ring_start,
// .irq_set = &rs600_irq_set,
// .irq_process = &r100_irq_process,
.irq_set = &rs600_irq_set,
.irq_process = &r100_irq_process,
// .fence_ring_emit = &r300_fence_ring_emit,
// .cs_parse = &r300_cs_parse,
// .copy_blit = &r100_copy_blit,
// .copy_dma = &r300_copy_dma,
// .copy = &r300_copy_dma,
// .set_engine_clock = &radeon_atom_set_engine_clock,
// .set_memory_clock = &radeon_atom_set_memory_clock,
// .set_pcie_lanes = NULL,
// .set_clock_gating = &radeon_atom_set_clock_gating,
.set_engine_clock = &radeon_atom_set_engine_clock,
.set_memory_clock = &radeon_atom_set_memory_clock,
.set_pcie_lanes = NULL,
.set_clock_gating = &radeon_atom_set_clock_gating,
};
 
#endif
/*
* rv515
*/
344,7 → 345,7
uint32_t rv515_pcie_rreg(struct radeon_device *rdev, uint32_t reg);
void rv515_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
 
 
/*
static struct radeon_asic rv515_asic = {
.init = &rv515_init,
.errata = &rv515_errata,
352,30 → 353,41
.gpu_reset = &rv515_gpu_reset,
.mc_init = &rv515_mc_init,
.mc_fini = &rv515_mc_fini,
// .wb_init = &r100_wb_init,
// .wb_fini = &r100_wb_fini,
.wb_init = &r100_wb_init,
.wb_fini = &r100_wb_fini,
.gart_enable = &r300_gart_enable,
.gart_disable = &rv370_pcie_gart_disable,
.gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
.gart_set_page = &rv370_pcie_gart_set_page,
.cp_init = &r100_cp_init,
// .cp_fini = &r100_cp_fini,
// .cp_disable = &r100_cp_disable,
.cp_fini = &r100_cp_fini,
.cp_disable = &r100_cp_disable,
.ring_start = &rv515_ring_start,
// .irq_set = &r100_irq_set,
// .irq_process = &r100_irq_process,
.irq_set = &r100_irq_set,
.irq_process = &r100_irq_process,
// .fence_ring_emit = &r300_fence_ring_emit,
// .cs_parse = &r300_cs_parse,
// .copy_blit = &r100_copy_blit,
// .copy_dma = &r300_copy_dma,
// .copy = &r100_copy_blit,
// .set_engine_clock = &radeon_atom_set_engine_clock,
// .set_memory_clock = &radeon_atom_set_memory_clock,
// .set_pcie_lanes = &rv370_set_pcie_lanes,
// .set_clock_gating = &radeon_atom_set_clock_gating,
.set_engine_clock = &radeon_atom_set_engine_clock,
.set_memory_clock = &radeon_atom_set_memory_clock,
.set_pcie_lanes = &rv370_set_pcie_lanes,
.set_clock_gating = &radeon_atom_set_clock_gating,
};
 
*/
 
 
int r300_gart_enable(struct radeon_device *rdev);
void rv370_pcie_gart_disable(struct radeon_device *rdev);
void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev);
int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
uint32_t rv370_pcie_rreg(struct radeon_device *rdev, uint32_t reg);
void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes);
 
 
/*
* r520,rv530,rv560,rv570,r580
*/
/drivers/video/drm/radeon/radeon_atombios.c
447,7 → 447,7
struct bios_connector bios_connectors[ATOM_MAX_SUPPORTED_DEVICE];
 
atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
 
ENTRY();
supported_devices =
(union atom_supported_devices *)(ctx->bios + data_offset);
 
596,7 → 596,7
}
 
radeon_link_encoder_connector(dev);
 
LEAVE();
return true;
}
 
944,6 → 944,8
struct radeon_device *rdev = dev->dev_private;
uint32_t bios_2_scratch, bios_6_scratch;
 
dbgprintf("%s\n",__FUNCTION__);
 
if (rdev->family >= CHIP_R600) {
bios_2_scratch = RREG32(R600_BIOS_0_SCRATCH);
bios_6_scratch = RREG32(R600_BIOS_6_SCRATCH);
/drivers/video/drm/radeon/radeon_gart.c
30,6 → 30,7
#include "radeon.h"
#include "radeon_reg.h"
 
#if 0
/*
* Common GART table functions.
*/
37,8 → 38,8
{
void *ptr;
 
// ptr = pci_alloc_consistent(rdev->pdev, rdev->gart.table_size,
// &rdev->gart.table_addr);
ptr = pci_alloc_consistent(rdev->pdev, rdev->gart.table_size,
&rdev->gart.table_addr);
if (ptr == NULL) {
return -ENOMEM;
}
66,12 → 67,13
rdev->gart.table_size >> PAGE_SHIFT);
}
#endif
// pci_free_consistent(rdev->pdev, rdev->gart.table_size,
// (void *)rdev->gart.table.ram.ptr,
// rdev->gart.table_addr);
pci_free_consistent(rdev->pdev, rdev->gart.table_size,
(void *)rdev->gart.table.ram.ptr,
rdev->gart.table_addr);
rdev->gart.table.ram.ptr = NULL;
rdev->gart.table_addr = 0;
}
#endif
 
int radeon_gart_table_vram_alloc(struct radeon_device *rdev)
{
/drivers/video/drm/radeon/rv515.c
52,6 → 52,7
void rv515_gpu_init(struct radeon_device *rdev);
int rv515_mc_wait_for_idle(struct radeon_device *rdev);
 
#if 0
/*
* MC
*/
60,15 → 61,15
uint32_t tmp;
int r;
 
// if (r100_debugfs_rbbm_init(rdev)) {
// DRM_ERROR("Failed to register debugfs file for RBBM !\n");
// }
// if (rv515_debugfs_pipes_info_init(rdev)) {
// DRM_ERROR("Failed to register debugfs file for pipes !\n");
// }
// if (rv515_debugfs_ga_info_init(rdev)) {
// DRM_ERROR("Failed to register debugfs file for pipes !\n");
// }
if (r100_debugfs_rbbm_init(rdev)) {
DRM_ERROR("Failed to register debugfs file for RBBM !\n");
}
if (rv515_debugfs_pipes_info_init(rdev)) {
DRM_ERROR("Failed to register debugfs file for pipes !\n");
}
if (rv515_debugfs_ga_info_init(rdev)) {
DRM_ERROR("Failed to register debugfs file for pipes !\n");
}
 
rv515_gpu_init(rdev);
rv370_pcie_gart_disable(rdev);
129,6 → 130,7
radeon_gart_fini(rdev);
}
 
#endif
 
/*
* Global GPU functions
254,6 → 256,7
return -1;
}
 
#if 0
void rv515_gpu_init(struct radeon_device *rdev)
{
unsigned pipe_select_current, gb_pipe_select, tmp;
285,6 → 288,7
}
}
 
#endif
 
int rv515_ga_reset(struct radeon_device *rdev)
{
/drivers/video/drm/radeon/radeon_clocks.c
94,8 → 94,8
 
if (rdev->is_atom_bios)
ret = radeon_atom_get_clock_info(dev);
else
ret = radeon_combios_get_clock_info(dev);
// else
// ret = radeon_combios_get_clock_info(dev);
 
if (ret) {
if (p1pll->reference_div < 2)