Subversion Repositories Kolibri OS

Compare Revisions

Regard whitespace Rev 1120 → Rev 1119

/drivers/video/drm/include/list.h
File deleted
/drivers/video/drm/include/drm_mm.h
File deleted
/drivers/video/drm/radeon/radeon_gart.c
File deleted
/drivers/video/drm/radeon/r300.c
File deleted
/drivers/video/drm/radeon/radeon_object.c
File deleted
/drivers/video/drm/radeon/atom.c
1165,7 → 1165,7
int atom_asic_init(struct atom_context *ctx)
{
 
dbgprintf("%s\n",__FUNCTION__);
dbgprintf("%s\n\r",__FUNCTION__);
 
int hwi = CU16(ctx->data_table + ATOM_DATA_FWI_PTR);
uint32_t ps[16];
/drivers/video/drm/radeon/pci.c
3,7 → 3,7
#include <errno-base.h>
#include <syscall.h>
 
static LIST_HEAD(devices);
link_t devices;
 
static dev_t* pci_scan_device(u32_t bus, int devfn);
 
346,7 → 346,7
 
dev = (dev_t*)malloc(sizeof(dev_t));
 
INIT_LIST_HEAD(&dev->link);
link_initialize(&dev->link);
 
if(unlikely(dev == NULL))
return NULL;
375,7 → 375,7
dev = pci_scan_device(bus, devfn);
if( dev )
{
list_add(&dev->link, &devices);
list_append(&dev->link, &devices);
 
nr++;
 
420,7 → 420,7
u32_t last_bus;
u32_t bus = 0 , devfn = 0;
 
// list_initialize(&devices);
list_initialize(&devices);
 
last_bus = PciApi(1);
 
/drivers/video/drm/radeon/r100.c
268,7 → 268,6
radeon_ring_write(rdev, RADEON_SW_INT_FIRE);
}
 
#endif
 
/*
* Writeback
308,16 → 307,14
void r100_wb_fini(struct radeon_device *rdev)
{
if (rdev->wb.wb_obj) {
// radeon_object_kunmap(rdev->wb.wb_obj);
// radeon_object_unpin(rdev->wb.wb_obj);
// radeon_object_unref(&rdev->wb.wb_obj);
radeon_object_kunmap(rdev->wb.wb_obj);
radeon_object_unpin(rdev->wb.wb_obj);
radeon_object_unref(&rdev->wb.wb_obj);
rdev->wb.wb = NULL;
rdev->wb.wb_obj = NULL;
}
}
 
 
#if 0
int r100_copy_blit(struct radeon_device *rdev,
uint64_t src_offset,
uint64_t dst_offset,
418,7 → 415,7
{
int i;
 
dbgprintf("%s\n",__FUNCTION__);
dbgprintf("%s\n\r",__FUNCTION__);
 
if (r100_gui_wait_for_idle(rdev)) {
printk(KERN_WARNING "Failed to wait GUI idle while "
501,7 → 498,7
uint32_t tmp;
int r;
 
dbgprintf("%s\n",__FUNCTION__);
dbgprintf("%s\n\r",__FUNCTION__);
 
// if (r100_debugfs_cp_init(rdev)) {
// DRM_ERROR("Failed to register debugfs file for CP !\n");
627,7 → 624,7
bool reinit_cp;
int i;
 
dbgprintf("%s\n",__FUNCTION__);
dbgprintf("%s\n\r",__FUNCTION__);
 
 
reinit_cp = rdev->cp.ready;
1173,7 → 1170,7
{
uint32_t tmp;
 
dbgprintf("%s\n",__FUNCTION__);
dbgprintf("%s\n\r",__FUNCTION__);
 
tmp = RREG32(RADEON_HOST_PATH_CNTL) & RADEON_HDP_APER_CNTL;
tmp |= (7 << 28);
1190,7 → 1187,7
uint32_t tmp;
int i;
 
dbgprintf("%s\n",__FUNCTION__);
dbgprintf("%s\n\r",__FUNCTION__);
 
WREG32(RADEON_RBBM_SOFT_RESET, RADEON_SOFT_RESET_E2);
(void)RREG32(RADEON_RBBM_SOFT_RESET);
/drivers/video/drm/radeon/r520.c
155,7 → 155,7
void r520_gpu_init(struct radeon_device *rdev)
{
unsigned pipe_select_current, gb_pipe_select, tmp;
dbgprintf("%s\n",__FUNCTION__);
dbgprintf("%s\n\r",__FUNCTION__);
 
r100_hdp_reset(rdev);
rs600_disable_vga(rdev);
204,7 → 204,7
static void r520_vram_get_type(struct radeon_device *rdev)
{
uint32_t tmp;
dbgprintf("%s\n",__FUNCTION__);
dbgprintf("%s\n\r",__FUNCTION__);
 
rdev->mc.vram_width = 128;
rdev->mc.vram_is_ddr = true;
245,7 → 245,7
void rs600_disable_vga(struct radeon_device *rdev)
{
unsigned tmp;
dbgprintf("%s\n",__FUNCTION__);
dbgprintf("%s\n\r",__FUNCTION__);
 
WREG32(0x330, 0);
WREG32(0x338, 0);
264,7 → 264,7
unsigned gb_pipe_select;
unsigned num_pipes;
 
dbgprintf("%s\n",__FUNCTION__);
dbgprintf("%s\n\r",__FUNCTION__);
 
/* GA_ENHANCE workaround TCL deadlock issue */
WREG32(0x4274, (1 << 0) | (1 << 1) | (1 << 2) | (1 << 3));
314,11 → 314,83
DRM_INFO("radeon: %d pipes initialized.\n", rdev->num_gb_pipes);
}
 
void rv370_pcie_gart_disable(struct radeon_device *rdev)
{
uint32_t tmp;
dbgprintf("%s\n\r",__FUNCTION__);
 
tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp & ~RADEON_PCIE_TX_GART_EN);
if (rdev->gart.table.vram.robj) {
// radeon_object_kunmap(rdev->gart.table.vram.robj);
// radeon_object_unpin(rdev->gart.table.vram.robj);
}
}
 
void radeon_gart_table_vram_free(struct radeon_device *rdev)
{
if (rdev->gart.table.vram.robj == NULL) {
return;
}
// radeon_object_kunmap(rdev->gart.table.vram.robj);
// radeon_object_unpin(rdev->gart.table.vram.robj);
// radeon_object_unref(&rdev->gart.table.vram.robj);
}
 
/*
* Common gart functions.
*/
void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
int pages)
{
unsigned t;
unsigned p;
int i, j;
dbgprintf("%s\n\r",__FUNCTION__);
 
if (!rdev->gart.ready) {
dbgprintf("trying to unbind memory to unitialized GART !\n");
return;
}
t = offset / 4096;
p = t / (PAGE_SIZE / 4096);
for (i = 0; i < pages; i++, p++) {
if (rdev->gart.pages[p]) {
// pci_unmap_page(rdev->pdev, rdev->gart.pages_addr[p],
// PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
rdev->gart.pages[p] = NULL;
rdev->gart.pages_addr[p] = 0;
for (j = 0; j < (PAGE_SIZE / 4096); j++, t++) {
radeon_gart_set_page(rdev, t, 0);
}
}
}
mb();
radeon_gart_tlb_flush(rdev);
}
 
 
 
void radeon_gart_fini(struct radeon_device *rdev)
{
if (rdev->gart.pages && rdev->gart.pages_addr && rdev->gart.ready) {
/* unbind pages */
radeon_gart_unbind(rdev, 0, rdev->gart.num_cpu_pages);
}
rdev->gart.ready = false;
// kfree(rdev->gart.pages);
// kfree(rdev->gart.pages_addr);
rdev->gart.pages = NULL;
rdev->gart.pages_addr = NULL;
}
 
 
 
int radeon_agp_init(struct radeon_device *rdev)
{
 
dbgprintf("%s\n",__FUNCTION__);
dbgprintf("%s\n\r",__FUNCTION__);
 
#if __OS_HAS_AGP
struct radeon_agpmode_quirk *p = radeon_agpmode_quirk_list;
463,14 → 535,185
 
}
 
int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
{
void __iomem *ptr = (void *)rdev->gart.table.vram.ptr;
 
if (i < 0 || i > rdev->gart.num_gpu_pages) {
return -EINVAL;
}
addr = (((u32_t)addr) >> 8) | ((upper_32_bits(addr) & 0xff) << 4) | 0xC;
writel(cpu_to_le32(addr), ((void __iomem *)ptr) + (i * 4));
return 0;
}
 
 
int radeon_gart_init(struct radeon_device *rdev)
{
 
dbgprintf("%s\n",__FUNCTION__);
 
if (rdev->gart.pages) {
return 0;
}
/* We need PAGE_SIZE >= 4096 */
if (PAGE_SIZE < 4096) {
DRM_ERROR("Page size is smaller than GPU page size!\n");
return -EINVAL;
}
/* Compute table size */
rdev->gart.num_cpu_pages = rdev->mc.gtt_size / PAGE_SIZE;
rdev->gart.num_gpu_pages = rdev->mc.gtt_size / 4096;
DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n",
rdev->gart.num_cpu_pages, rdev->gart.num_gpu_pages);
/* Allocate pages table */
rdev->gart.pages = kzalloc(sizeof(void *) * rdev->gart.num_cpu_pages,
GFP_KERNEL);
if (rdev->gart.pages == NULL) {
// radeon_gart_fini(rdev);
return -ENOMEM;
}
rdev->gart.pages_addr = kzalloc(sizeof(u32_t) *
rdev->gart.num_cpu_pages, GFP_KERNEL);
if (rdev->gart.pages_addr == NULL) {
// radeon_gart_fini(rdev);
return -ENOMEM;
}
return 0;
}
 
int radeon_gart_table_vram_alloc(struct radeon_device *rdev)
{
uint32_t gpu_addr;
int r;
 
// if (rdev->gart.table.vram.robj == NULL) {
// r = radeon_object_create(rdev, NULL,
// rdev->gart.table_size,
// true,
// RADEON_GEM_DOMAIN_VRAM,
// false, &rdev->gart.table.vram.robj);
// if (r) {
// return r;
// }
// }
// r = radeon_object_pin(rdev->gart.table.vram.robj,
// RADEON_GEM_DOMAIN_VRAM, &gpu_addr);
// if (r) {
// radeon_object_unref(&rdev->gart.table.vram.robj);
// return r;
// }
// r = radeon_object_kmap(rdev->gart.table.vram.robj,
// (void **)&rdev->gart.table.vram.ptr);
// if (r) {
// radeon_object_unpin(rdev->gart.table.vram.robj);
// radeon_object_unref(&rdev->gart.table.vram.robj);
// DRM_ERROR("radeon: failed to map gart vram table.\n");
// return r;
// }
 
gpu_addr = 0x800000;
 
u32_t pci_addr = rdev->mc.aper_base + gpu_addr;
 
rdev->gart.table.vram.ptr = (void*)MapIoMem(pci_addr, rdev->gart.table_size, PG_SW);
 
rdev->gart.table_addr = gpu_addr;
 
dbgprintf("alloc gart vram:\n gpu_base %x pci_base %x lin_addr %x",
gpu_addr, pci_addr, rdev->gart.table.vram.ptr);
 
return 0;
}
 
void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev);
 
int rv370_pcie_gart_enable(struct radeon_device *rdev)
{
uint32_t table_addr;
uint32_t tmp;
int r;
 
dbgprintf("%s\n",__FUNCTION__);
 
/* Initialize common gart structure */
r = radeon_gart_init(rdev);
if (r) {
return r;
}
// r = rv370_debugfs_pcie_gart_info_init(rdev);
// if (r) {
// DRM_ERROR("Failed to register debugfs file for PCIE gart !\n");
// }
rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
r = radeon_gart_table_vram_alloc(rdev);
if (r) {
return r;
}
/* discard memory request outside of configured range */
tmp = RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp);
WREG32_PCIE(RADEON_PCIE_TX_GART_START_LO, rdev->mc.gtt_location);
tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 4096;
WREG32_PCIE(RADEON_PCIE_TX_GART_END_LO, tmp);
WREG32_PCIE(RADEON_PCIE_TX_GART_START_HI, 0);
WREG32_PCIE(RADEON_PCIE_TX_GART_END_HI, 0);
table_addr = rdev->gart.table_addr;
WREG32_PCIE(RADEON_PCIE_TX_GART_BASE, table_addr);
/* FIXME: setup default page */
WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_LO, rdev->mc.vram_location);
WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_HI, 0);
/* Clear error */
WREG32_PCIE(0x18, 0);
tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
tmp |= RADEON_PCIE_TX_GART_EN;
tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp);
rv370_pcie_gart_tlb_flush(rdev);
DRM_INFO("PCIE GART of %uM enabled (table at 0x%08X).\n",
rdev->mc.gtt_size >> 20, table_addr);
rdev->gart.ready = true;
return 0;
}
 
void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev)
{
uint32_t tmp;
int i;
 
/* Workaround HW bug do flush 2 times */
for (i = 0; i < 2; i++) {
tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp | RADEON_PCIE_TX_GART_INVALIDATE_TLB);
(void)RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp);
mb();
}
}
 
int r300_gart_enable(struct radeon_device *rdev)
{
#if __OS_HAS_AGP
if (rdev->flags & RADEON_IS_AGP) {
if (rdev->family > CHIP_RV350) {
rv370_pcie_gart_disable(rdev);
} else {
r100_pci_gart_disable(rdev);
}
return 0;
}
#endif
if (rdev->flags & RADEON_IS_PCIE) {
rdev->asic->gart_disable = &rv370_pcie_gart_disable;
rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush;
rdev->asic->gart_set_page = &rv370_pcie_gart_set_page;
return rv370_pcie_gart_enable(rdev);
}
// return r100_pci_gart_enable(rdev);
}
 
 
 
int radeon_fence_driver_init(struct radeon_device *rdev)
{
unsigned long irq_flags;
498,7 → 741,49
}
 
 
int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
int pages, u32_t *pagelist)
{
unsigned t;
unsigned p;
uint64_t page_base;
int i, j;
 
dbgprintf("%s\n\r",__FUNCTION__);
 
 
if (!rdev->gart.ready) {
DRM_ERROR("trying to bind memory to unitialized GART !\n");
return -EINVAL;
}
t = offset / 4096;
p = t / (PAGE_SIZE / 4096);
 
for (i = 0; i < pages; i++, p++) {
/* we need to support large memory configurations */
/* assume that unbind have already been call on the range */
 
rdev->gart.pages_addr[p] = pagelist[i] & ~4095;
 
//if (pci_dma_mapping_error(rdev->pdev, rdev->gart.pages_addr[p])) {
// /* FIXME: failed to map page (return -ENOMEM?) */
// radeon_gart_unbind(rdev, offset, pages);
// return -ENOMEM;
//}
rdev->gart.pages[p] = pagelist[i];
page_base = (uint32_t)rdev->gart.pages_addr[p];
for (j = 0; j < (PAGE_SIZE / 4096); j++, t++) {
radeon_gart_set_page(rdev, t, page_base);
page_base += 4096;
}
}
mb();
radeon_gart_tlb_flush(rdev);
 
dbgprintf("done %s\n",__FUNCTION__);
 
return 0;
}
 
 
 
/drivers/video/drm/radeon/radeon.h
44,13 → 44,11
* - TESTING, TESTING, TESTING
*/
 
#include <types.h>
#include <list.h>
#include "types.h"
#include "pci.h"
 
#include <pci.h>
#include "errno-base.h"
 
#include <errno-base.h>
 
#include "radeon_mode.h"
#include "radeon_reg.h"
#include "r300.h"
62,6 → 60,7
extern int radeon_r4xx_atom;
 
 
 
/*
* Copy from radeon_drv.h so we don't have to include both and have conflicting
* symbol;
170,15 → 169,15
unsigned long count_timeout;
// wait_queue_head_t queue;
// rwlock_t lock;
struct list_head created;
struct list_head emited;
struct list_head signaled;
// struct list_head created;
// struct list_head emited;
// struct list_head signaled;
};
 
struct radeon_fence {
struct radeon_device *rdev;
// struct kref kref;
struct list_head list;
// struct list_head list;
/* protected by radeon_fence.lock */
uint32_t seq;
unsigned long timeout;
205,7 → 204,7
struct radeon_object;
 
struct radeon_object_list {
struct list_head list;
// struct list_head list;
struct radeon_object *robj;
uint64_t gpu_offset;
unsigned rdomain;
217,6 → 216,7
 
 
 
 
/*
* GART structures, functions & helpers
*/
255,8 → 255,8
void radeon_gart_fini(struct radeon_device *rdev);
void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
int pages);
int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
int pages, u32_t *pagelist);
//int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
// int pages, struct page **pagelist);
 
 
/*
309,7 → 309,7
* CP & ring.
*/
struct radeon_ib {
struct list_head list;
// struct list_head list;
unsigned long idx;
uint64_t gpu_addr;
struct radeon_fence *fence;
320,10 → 320,10
struct radeon_ib_pool {
// struct mutex mutex;
struct radeon_object *robj;
struct list_head scheduled_ibs;
// struct list_head scheduled_ibs;
struct radeon_ib ibs[RADEON_IB_POOL_SIZE];
bool ready;
DECLARE_BITMAP(alloc_bm, RADEON_IB_POOL_SIZE);
// DECLARE_BITMAP(alloc_bm, RADEON_IB_POOL_SIZE);
};
 
struct radeon_cp {
364,7 → 364,7
struct radeon_cs_reloc {
// struct drm_gem_object *gobj;
struct radeon_object *robj;
struct radeon_object_list lobj;
// struct radeon_object_list lobj;
uint32_t handle;
uint32_t flags;
};
388,7 → 388,7
unsigned nrelocs;
struct radeon_cs_reloc *relocs;
struct radeon_cs_reloc **relocs_ptr;
struct list_head validated;
// struct list_head validated;
/* indices of various chunks */
int chunk_ib_idx;
int chunk_relocs_idx;
/drivers/video/drm/radeon/radeon_asic.h
403,8 → 403,8
.gpu_reset = &rv515_gpu_reset,
.mc_init = &r520_mc_init,
.mc_fini = &r520_mc_fini,
.wb_init = &r100_wb_init,
.wb_fini = &r100_wb_fini,
// .wb_init = &r100_wb_init,
// .wb_fini = &r100_wb_fini,
.gart_enable = &r300_gart_enable,
.gart_disable = &rv370_pcie_gart_disable,
.gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
/drivers/video/drm/radeon/radeon_device.c
46,7 → 46,7
*/
static void radeon_surface_init(struct radeon_device *rdev)
{
dbgprintf("%s\n",__FUNCTION__);
dbgprintf("%s\n\r",__FUNCTION__);
 
/* FIXME: check this out */
if (rdev->family < CHIP_R600) {
180,7 → 180,7
{
uint32_t reg;
 
dbgprintf("%s\n",__FUNCTION__);
dbgprintf("%s\n\r",__FUNCTION__);
 
/* first check CRTCs */
if (ASIC_IS_AVIVO(rdev)) {
231,7 → 231,7
void radeon_register_accessor_init(struct radeon_device *rdev)
{
 
dbgprintf("%s\n",__FUNCTION__);
dbgprintf("%s\n\r",__FUNCTION__);
 
rdev->mm_rreg = &r100_mm_rreg;
rdev->mm_wreg = &r100_mm_wreg;
288,7 → 288,7
int radeon_asic_init(struct radeon_device *rdev)
{
 
dbgprintf("%s\n",__FUNCTION__);
dbgprintf("%s\n\r",__FUNCTION__);
 
radeon_register_accessor_init(rdev);
switch (rdev->family) {
360,7 → 360,7
{
int r;
 
dbgprintf("%s\n",__FUNCTION__);
dbgprintf("%s\n\r",__FUNCTION__);
 
radeon_get_clock_info(rdev->ddev);
r = radeon_static_clocks_init(rdev->ddev);
436,7 → 436,7
 
int radeon_atombios_init(struct radeon_device *rdev)
{
dbgprintf("%s\n",__FUNCTION__);
dbgprintf("%s\n\r",__FUNCTION__);
 
atom_card_info.dev = rdev->ddev;
rdev->mode_info.atom_context = atom_parse(&atom_card_info, rdev->bios);
462,6 → 462,7
int radeon_modeset_init(struct radeon_device *rdev);
void radeon_modeset_fini(struct radeon_device *rdev);
 
void *ring_buffer;
/*
* Radeon device.
*/
472,7 → 473,7
{
int r, ret = -1;
 
dbgprintf("%s\n",__FUNCTION__);
dbgprintf("%s\n\r",__FUNCTION__);
 
DRM_INFO("radeon: Initializing kernel modesetting.\n");
rdev->shutdown = false;
491,6 → 492,7
// mutex_init(&rdev->cp.mutex);
// rwlock_init(&rdev->fence_drv.lock);
 
ring_buffer = CreateRingBuffer( 1024*1024, PG_SW );
 
if (radeon_agpmode == -1) {
rdev->flags &= ~RADEON_IS_AGP;
618,10 → 620,10
// return r;
// }
/* Memory manager */
r = radeon_object_init(rdev);
if (r) {
return r;
}
// r = radeon_object_init(rdev);
// if (r) {
// return r;
// }
/* Initialize GART (initialize after TTM so we can allocate
* memory through TTM but finalize after TTM) */
r = radeon_gart_enable(rdev);
633,14 → 635,15
if (!r) {
r = radeon_cp_init(rdev, 1024 * 1024);
}
if (!r) {
r = radeon_wb_init(rdev);
if (r) {
DRM_ERROR("radeon: failled initializing WB (%d).\n", r);
return r;
}
}
// if (!r) {
// r = radeon_wb_init(rdev);
// if (r) {
// DRM_ERROR("radeon: failled initializing WB (%d).\n", r);
// return r;
// }
// }
 
#if 0
if (!r) {
r = radeon_ib_pool_init(rdev);
if (r) {
648,8 → 651,6
return r;
}
}
#if 0
 
if (!r) {
r = radeon_ib_test(rdev);
if (r) {
693,9 → 694,9
if(action != 1)
return 0;
 
if(!dbg_open("/hd0/2/atikms.log"))
if(!dbg_open("/rd/1/drivers/atikms.log"))
{
printf("Can't open /hd0/2/atikms.log\nExit\n");
printf("Can't open /rd/1/drivers/ati2d.log\nExit\n");
return 0;
}
 
792,9 → 793,9
struct radeon_device *rdev;
int r;
 
dbgprintf("%s\n",__FUNCTION__);
dbgprintf("%s\n\r",__FUNCTION__);
 
rdev = kzalloc(sizeof(struct radeon_device), GFP_KERNEL);
rdev = malloc(sizeof(struct radeon_device));
if (rdev == NULL) {
return -ENOMEM;
};
824,7 → 825,7
struct drm_device *dev;
int ret;
 
dbgprintf("%s\n",__FUNCTION__);
dbgprintf("%s\n\r",__FUNCTION__);
 
dev = malloc(sizeof(*dev));
if (!dev)
/drivers/video/drm/radeon/radeon_ring.c
32,15 → 32,14
#include "radeon.h"
#include "atom.h"
 
extern void * ring_buffer;
 
#if 0
int radeon_debugfs_ib_init(struct radeon_device *rdev);
 
/*
* IB.
*/
 
#if 0
 
int radeon_ib_get(struct radeon_device *rdev, struct radeon_ib **ib)
{
struct radeon_fence *fence;
99,7 → 98,6
return r;
}
 
 
void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib)
{
struct radeon_ib *tmp = *ib;
172,7 → 170,6
mutex_unlock(&rdev->ib_pool.mutex);
return 0;
}
#endif
 
int radeon_ib_pool_init(struct radeon_device *rdev)
{
213,9 → 210,9
bitmap_zero(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE);
rdev->ib_pool.ready = true;
DRM_INFO("radeon: ib pool ready.\n");
// if (radeon_debugfs_ib_init(rdev)) {
// DRM_ERROR("Failed to register debugfs file for IB !\n");
// }
if (radeon_debugfs_ib_init(rdev)) {
DRM_ERROR("Failed to register debugfs file for IB !\n");
}
return r;
}
 
224,18 → 221,16
if (!rdev->ib_pool.ready) {
return;
}
// mutex_lock(&rdev->ib_pool.mutex);
mutex_lock(&rdev->ib_pool.mutex);
bitmap_zero(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE);
if (rdev->ib_pool.robj) {
// radeon_object_kunmap(rdev->ib_pool.robj);
// radeon_object_unref(&rdev->ib_pool.robj);
radeon_object_kunmap(rdev->ib_pool.robj);
radeon_object_unref(&rdev->ib_pool.robj);
rdev->ib_pool.robj = NULL;
}
// mutex_unlock(&rdev->ib_pool.mutex);
mutex_unlock(&rdev->ib_pool.mutex);
}
 
#if 0
 
int radeon_ib_test(struct radeon_device *rdev)
{
struct radeon_ib *ib;
407,6 → 402,7
int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
int pages, u32_t *pagelist);
 
#define page_tabs 0xFDC00000
 
 
int radeon_ring_init(struct radeon_device *rdev, unsigned ring_size)
417,6 → 413,7
 
rdev->cp.ring_size = ring_size;
 
#if 0
/* Allocate ring buffer */
if (rdev->cp.ring_obj == NULL) {
r = radeon_object_create(rdev, NULL, rdev->cp.ring_size,
445,19 → 442,23
return r;
}
}
#endif
 
dbgprintf("ring size %x\n", ring_size);
 
// rdev->cp.ring = CreateRingBuffer( ring_size, PG_SW );
dbgprintf("ring buffer %x\n", rdev->cp.ring );
 
rdev->cp.ring = ring_buffer; //CreateRingBuffer( ring_size, PG_SW );
 
dbgprintf("ring buffer %x\n", rdev->cp.ring );
 
// rdev->cp.gpu_addr = rdev->mc.gtt_location;
rdev->cp.gpu_addr = rdev->mc.gtt_location;
 
// u32_t *pagelist = &((u32_t*)page_tabs)[(u32_t)rdev->cp.ring >> 12];
u32_t *pagelist = &((u32_t*)page_tabs)[(u32_t)rdev->cp.ring >> 12];
 
// dbgprintf("pagelist %x\n", pagelist);
dbgprintf("pagelist %x\n", pagelist);
 
// radeon_gart_bind(rdev, 0, ring_size / 4096, pagelist);
radeon_gart_bind(rdev, 0, ring_size / 4096, pagelist);
 
rdev->cp.ptr_mask = (rdev->cp.ring_size / 4) - 1;
rdev->cp.ring_free_dw = rdev->cp.ring_size / 4;
/drivers/video/drm/radeon/rv515.c
140,7 → 140,7
unsigned gb_tile_config;
int r;
 
dbgprintf("%s\n",__FUNCTION__);
dbgprintf("%s\n\r",__FUNCTION__);
/* Sub pixel 1/12 so we can have 4K rendering according to doc */
gb_tile_config = R300_ENABLE_TILING | R300_TILE_SIZE_16;
switch (rdev->num_gb_pipes) {
231,7 → 231,7
radeon_ring_write(rdev, 0);
radeon_ring_unlock_commit(rdev);
 
dbgprintf("done %s\n",__FUNCTION__);
dbgprintf("done %s\n\r",__FUNCTION__);
 
}
 
296,7 → 296,7
bool reinit_cp;
int i;
 
dbgprintf("%s\n",__FUNCTION__);
dbgprintf("%s\n\r",__FUNCTION__);
 
reinit_cp = rdev->cp.ready;
rdev->cp.ready = false;
350,7 → 350,7
{
uint32_t status;
 
dbgprintf("%s\n",__FUNCTION__);
dbgprintf("%s\n\r",__FUNCTION__);
 
/* reset order likely matter */
status = RREG32(RADEON_RBBM_STATUS);
569,7 → 569,7
 
int rv515_init(struct radeon_device *rdev)
{
dbgprintf("%s\n",__FUNCTION__);
dbgprintf("%s\n\r",__FUNCTION__);
 
rdev->config.r300.reg_safe_bm = r500_reg_safe_bm;
rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(r500_reg_safe_bm);
/drivers/video/drm/radeon/radeon_atombios.c
944,7 → 944,7
struct radeon_device *rdev = dev->dev_private;
uint32_t bios_2_scratch, bios_6_scratch;
 
dbgprintf("%s\n",__FUNCTION__);
dbgprintf("%s\n\r",__FUNCTION__);
 
if (rdev->family >= CHIP_R600) {
bios_2_scratch = RREG32(R600_BIOS_0_SCRATCH);
/drivers/video/drm/radeon/radeon_bios.c
39,7 → 39,7
size_t size;
 
rdev->bios = NULL;
bios = (uint8_t*)pci_map_rom(rdev->pdev, &size);
bios = pci_map_rom(rdev->pdev, &size);
if (!bios) {
return false;
}