Subversion Repositories Kolibri OS

Compare Revisions

Regard whitespace Rev 3763 → Rev 3764

/drivers/video/drm/radeon/radeon_ttm.c
74,6 → 74,8
struct drm_global_reference *global_ref;
int r;
 
ENTER();
 
rdev->mman.mem_global_referenced = false;
global_ref = &rdev->mman.mem_global_ref;
global_ref->global_type = DRM_GLOBAL_TTM_MEM;
102,10 → 104,14
}
 
rdev->mman.mem_global_referenced = true;
 
LEAVE();
 
return 0;
}
 
 
 
static int radeon_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
{
return 0;
116,6 → 122,8
{
struct radeon_device *rdev;
 
ENTER();
 
rdev = radeon_get_rdev(bdev);
 
switch (type) {
159,11 → 167,170
DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
return -EINVAL;
}
 
LEAVE();
 
return 0;
}
 
static void radeon_evict_flags(struct ttm_buffer_object *bo,
struct ttm_placement *placement)
{
struct radeon_bo *rbo;
static u32 placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
 
if (!radeon_ttm_bo_is_radeon_bo(bo)) {
placement->fpfn = 0;
placement->lpfn = 0;
placement->placement = &placements;
placement->busy_placement = &placements;
placement->num_placement = 1;
placement->num_busy_placement = 1;
return;
}
rbo = container_of(bo, struct radeon_bo, tbo);
switch (bo->mem.mem_type) {
case TTM_PL_VRAM:
if (rbo->rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready == false)
radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU);
else
radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
break;
case TTM_PL_TT:
default:
radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU);
}
*placement = rbo->placement;
}
 
static int radeon_verify_access(struct ttm_buffer_object *bo, struct file *filp)
{
return 0;
}
 
static void radeon_move_null(struct ttm_buffer_object *bo,
struct ttm_mem_reg *new_mem)
{
struct ttm_mem_reg *old_mem = &bo->mem;
 
BUG_ON(old_mem->mm_node != NULL);
*old_mem = *new_mem;
new_mem->mm_node = NULL;
}
 
static void radeon_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
{
}
 
static int radeon_sync_obj_wait(void *sync_obj, bool lazy, bool interruptible)
{
return radeon_fence_wait((struct radeon_fence *)sync_obj, interruptible);
}
 
static int radeon_sync_obj_flush(void *sync_obj)
{
return 0;
}
 
static void radeon_sync_obj_unref(void **sync_obj)
{
radeon_fence_unref((struct radeon_fence **)sync_obj);
}
 
static void *radeon_sync_obj_ref(void *sync_obj)
{
return radeon_fence_ref((struct radeon_fence *)sync_obj);
}
 
static bool radeon_sync_obj_signaled(void *sync_obj)
{
return radeon_fence_signaled((struct radeon_fence *)sync_obj);
}
 
/*
* TTM backend functions.
*/
struct radeon_ttm_tt {
struct ttm_dma_tt ttm;
struct radeon_device *rdev;
u64 offset;
};
 
static int radeon_ttm_backend_bind(struct ttm_tt *ttm,
struct ttm_mem_reg *bo_mem)
{
struct radeon_ttm_tt *gtt = (void*)ttm;
int r;
 
gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT);
if (!ttm->num_pages) {
WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
ttm->num_pages, bo_mem, ttm);
}
r = radeon_gart_bind(gtt->rdev, gtt->offset,
ttm->num_pages, ttm->pages, gtt->ttm.dma_address);
if (r) {
DRM_ERROR("failed to bind %lu pages at 0x%08X\n",
ttm->num_pages, (unsigned)gtt->offset);
return r;
}
return 0;
}
 
static int radeon_ttm_backend_unbind(struct ttm_tt *ttm)
{
struct radeon_ttm_tt *gtt = (void *)ttm;
 
radeon_gart_unbind(gtt->rdev, gtt->offset, ttm->num_pages);
return 0;
}
 
static void radeon_ttm_backend_destroy(struct ttm_tt *ttm)
{
struct radeon_ttm_tt *gtt = (void *)ttm;
 
ttm_dma_tt_fini(&gtt->ttm);
kfree(gtt);
}
 
static struct ttm_backend_func radeon_backend_func = {
.bind = &radeon_ttm_backend_bind,
.unbind = &radeon_ttm_backend_unbind,
.destroy = &radeon_ttm_backend_destroy,
};
 
static struct ttm_tt *radeon_ttm_tt_create(struct ttm_bo_device *bdev,
unsigned long size, uint32_t page_flags,
struct page *dummy_read_page)
{
struct radeon_device *rdev;
struct radeon_ttm_tt *gtt;
 
rdev = radeon_get_rdev(bdev);
#if __OS_HAS_AGP
if (rdev->flags & RADEON_IS_AGP) {
return ttm_agp_tt_create(bdev, rdev->ddev->agp->bridge,
size, page_flags, dummy_read_page);
}
#endif
 
gtt = kzalloc(sizeof(struct radeon_ttm_tt), GFP_KERNEL);
if (gtt == NULL) {
return NULL;
}
gtt->ttm.ttm.func = &radeon_backend_func;
gtt->rdev = rdev;
if (ttm_dma_tt_init(&gtt->ttm, bdev, size, page_flags, dummy_read_page)) {
kfree(gtt);
return NULL;
}
return &gtt->ttm.ttm;
}
 
static struct ttm_bo_driver radeon_bo_driver = {
// .create_ttm_backend_entry = &radeon_create_ttm_backend_entry,
.ttm_tt_create = &radeon_ttm_tt_create,
// .ttm_tt_populate = &radeon_ttm_tt_populate,
// .ttm_tt_unpopulate = &radeon_ttm_tt_unpopulate,
// .invalidate_caches = &radeon_invalidate_caches,
.init_mem_type = &radeon_init_mem_type,
// .evict_flags = &radeon_evict_flags,
176,6 → 343,8
// .sync_obj_ref = &radeon_sync_obj_ref,
// .move_notify = &radeon_bo_move_notify,
// .fault_reserve_notify = &radeon_bo_fault_reserve_notify,
// .io_mem_reserve = &radeon_ttm_io_mem_reserve,
// .io_mem_free = &radeon_ttm_io_mem_free,
};
 
int radeon_ttm_init(struct radeon_device *rdev)
182,6 → 351,8
{
int r;
 
ENTER();
 
r = radeon_ttm_global_init(rdev);
if (r) {
return r;
202,21 → 373,23
DRM_ERROR("Failed initializing VRAM heap.\n");
return r;
}
r = radeon_bo_create(rdev, NULL, 256 * 1024, true,
RADEON_GEM_DOMAIN_VRAM,
&rdev->stollen_vga_memory);
if (r) {
return r;
}
r = radeon_bo_reserve(rdev->stollen_vga_memory, false);
if (r)
return r;
r = radeon_bo_pin(rdev->stollen_vga_memory, RADEON_GEM_DOMAIN_VRAM, NULL);
radeon_bo_unreserve(rdev->stollen_vga_memory);
if (r) {
radeon_bo_unref(&rdev->stollen_vga_memory);
return r;
}
 
// r = radeon_bo_create(rdev, 256 * 1024, PAGE_SIZE, true,
// RADEON_GEM_DOMAIN_VRAM,
// NULL, &rdev->stollen_vga_memory);
// if (r) {
// return r;
// }
// r = radeon_bo_reserve(rdev->stollen_vga_memory, false);
// if (r)
// return r;
// r = radeon_bo_pin(rdev->stollen_vga_memory, RADEON_GEM_DOMAIN_VRAM, NULL);
// radeon_bo_unreserve(rdev->stollen_vga_memory);
// if (r) {
// radeon_bo_unref(&rdev->stollen_vga_memory);
// return r;
// }
 
DRM_INFO("radeon: %uM of VRAM memory ready\n",
(unsigned)rdev->mc.real_vram_size / (1024 * 1024));
r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_TT,
227,36 → 400,76
}
DRM_INFO("radeon: %uM of GTT memory ready.\n",
(unsigned)(rdev->mc.gtt_size / (1024 * 1024)));
if (unlikely(rdev->mman.bdev.dev_mapping == NULL)) {
rdev->mman.bdev.dev_mapping = rdev->ddev->dev_mapping;
}
 
r = radeon_ttm_debugfs_init(rdev);
if (r) {
DRM_ERROR("Failed to init debugfs\n");
return r;
}
LEAVE();
 
return 0;
}
 
static struct vm_operations_struct radeon_ttm_vm_ops;
static const struct vm_operations_struct *ttm_vm_ops = NULL;
 
/* this should only be called at bootup or when userspace
* isn't running */
void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size)
{
struct ttm_mem_type_manager *man;
 
if (!rdev->mman.initialized)
return;
 
man = &rdev->mman.bdev.man[TTM_PL_VRAM];
/* this just adjusts TTM size idea, which sets lpfn to the correct value */
man->size = size >> PAGE_SHIFT;
}
 
static struct vm_operations_struct radeon_ttm_vm_ops;
static const struct vm_operations_struct *ttm_vm_ops = NULL;
 
#if 0
 
radeon_bo_init
{
<6>[drm] Detected VRAM RAM=1024M, BAR=256M
<6>[drm] RAM width 128bits DDR
 
radeon_ttm_init
{
radeon_ttm_global_init
{
radeon_ttm_mem_global_init
 
ttm_bo_global_init
}
 
ttm_bo_device_init
{
ttm_bo_init_mm
{
radeon_init_mem_type
};
}
 
ttm_bo_init_mm
{
radeon_init_mem_type
 
ttm_bo_man_init
}
 
<6>[drm] radeon: 1024M of VRAM memory ready
 
ttm_bo_init_mm
{
radeon_init_mem_type
 
ttm_bo_man_init
}
 
<6>[drm] radeon: 512M of GTT memory ready.
}
};
 
#endif