29,13 → 29,6 |
#include <drm/radeon_drm.h> |
#include "radeon.h" |
|
int radeon_gem_object_init(struct drm_gem_object *obj) |
{ |
BUG(); |
|
return 0; |
} |
|
void radeon_gem_object_free(struct drm_gem_object *gobj) |
{ |
struct radeon_bo *robj = gem_to_radeon_bo(gobj); |
45,9 → 38,9 |
} |
} |
|
int radeon_gem_object_create(struct radeon_device *rdev, int size, |
int radeon_gem_object_create(struct radeon_device *rdev, unsigned long size, |
int alignment, int initial_domain, |
bool discardable, bool kernel, |
u32 flags, bool kernel, |
struct drm_gem_object **obj) |
{ |
struct radeon_bo *robj; |
60,16 → 53,19 |
alignment = PAGE_SIZE; |
} |
|
/* maximun bo size is the minimun btw visible vram and gtt size */ |
max_size = min(rdev->mc.visible_vram_size, rdev->mc.gtt_size); |
/* Maximum bo size is the unpinned gtt size since we use the gtt to |
* handle vram to system pool migrations. |
*/ |
max_size = rdev->mc.gtt_size - rdev->gart_pin_size; |
if (size > max_size) { |
printk(KERN_WARNING "%s:%d alloc size %dMb bigger than %ldMb limit\n", |
__func__, __LINE__, size >> 20, max_size >> 20); |
DRM_DEBUG("Allocation size %ldMb bigger than %ldMb limit\n", |
size >> 20, max_size >> 20); |
return -ENOMEM; |
} |
|
retry: |
r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain, NULL, &robj); |
r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain, |
flags, NULL, &robj); |
if (r) { |
if (r != -ERESTARTSYS) { |
if (initial_domain == RADEON_GEM_DOMAIN_VRAM) { |
76,7 → 72,7 |
initial_domain |= RADEON_GEM_DOMAIN_GTT; |
goto retry; |
} |
DRM_ERROR("Failed to allocate GEM object (%d, %d, %u, %d)\n", |
DRM_ERROR("Failed to allocate GEM object (%ld, %d, %u, %d)\n", |
size, initial_domain, alignment, r); |
} |
return r; |
90,33 → 86,7 |
return 0; |
} |
|
int radeon_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain, |
uint64_t *gpu_addr) |
{ |
struct radeon_bo *robj = gem_to_radeon_bo(obj); |
int r; |
|
r = radeon_bo_reserve(robj, false); |
if (unlikely(r != 0)) |
return r; |
r = radeon_bo_pin(robj, pin_domain, gpu_addr); |
radeon_bo_unreserve(robj); |
return r; |
} |
|
void radeon_gem_object_unpin(struct drm_gem_object *obj) |
{ |
struct radeon_bo *robj = gem_to_radeon_bo(obj); |
int r; |
|
r = radeon_bo_reserve(robj, false); |
if (likely(r == 0)) { |
radeon_bo_unpin(robj); |
radeon_bo_unreserve(robj); |
} |
} |
|
int radeon_gem_set_domain(struct drm_gem_object *gobj, |
static int radeon_gem_set_domain(struct drm_gem_object *gobj, |
uint32_t rdomain, uint32_t wdomain) |
{ |
struct radeon_bo *robj; |
167,18 → 137,15 |
struct radeon_device *rdev = dev->dev_private; |
struct drm_radeon_gem_info *args = data; |
struct ttm_mem_type_manager *man; |
unsigned i; |
|
man = &rdev->mman.bdev.man[TTM_PL_VRAM]; |
|
args->vram_size = rdev->mc.real_vram_size; |
args->vram_visible = (u64)man->size << PAGE_SHIFT; |
if (rdev->stollen_vga_memory) |
args->vram_visible -= radeon_bo_size(rdev->stollen_vga_memory); |
args->vram_visible -= radeon_fbdev_total_size(rdev); |
args->gart_size = rdev->mc.gtt_size - 4096 - RADEON_IB_POOL_SIZE*64*1024; |
for(i = 0; i < RADEON_NUM_RINGS; ++i) |
args->gart_size -= rdev->ring[i].ring_size; |
args->vram_visible -= rdev->vram_pin_size; |
args->gart_size = rdev->mc.gtt_size; |
args->gart_size -= rdev->gart_pin_size; |
|
return 0; |
} |
|
211,7 → 178,7 |
/* create a gem object to contain this object in */ |
args->size = roundup(args->size, PAGE_SIZE); |
r = radeon_gem_object_create(rdev, args->size, args->alignment, |
args->initial_domain, false, |
args->initial_domain, args->flags, |
false, &gobj); |
if (r) { |
up_read(&rdev->exclusive_lock); |
303,18 → 270,7 |
} |
robj = gem_to_radeon_bo(gobj); |
r = radeon_bo_wait(robj, &cur_placement, true); |
switch (cur_placement) { |
case TTM_PL_VRAM: |
args->domain = RADEON_GEM_DOMAIN_VRAM; |
break; |
case TTM_PL_TT: |
args->domain = RADEON_GEM_DOMAIN_GTT; |
break; |
case TTM_PL_SYSTEM: |
args->domain = RADEON_GEM_DOMAIN_CPU; |
default: |
break; |
} |
args->domain = radeon_mem_type_to_domain(cur_placement); |
drm_gem_object_unreference_unlocked(gobj); |
r = radeon_gem_handle_lockup(rdev, r); |
return r; |
328,6 → 284,7 |
struct drm_gem_object *gobj; |
struct radeon_bo *robj; |
int r; |
uint32_t cur_placement = 0; |
|
gobj = drm_gem_object_lookup(dev, filp, args->handle); |
if (gobj == NULL) { |
334,10 → 291,11 |
return -ENOENT; |
} |
robj = gem_to_radeon_bo(gobj); |
r = radeon_bo_wait(robj, NULL, false); |
/* callback hw specific functions if any */ |
if (rdev->asic->ioctl_wait_idle) |
robj->rdev->asic->ioctl_wait_idle(rdev, robj); |
r = radeon_bo_wait(robj, &cur_placement, false); |
/* Flush HDP cache via MMIO if necessary */ |
if (rdev->asic->mmio_hdp_flush && |
radeon_mem_type_to_domain(cur_placement) == RADEON_GEM_DOMAIN_VRAM) |
robj->rdev->asic->mmio_hdp_flush(rdev); |
drm_gem_object_unreference_unlocked(gobj); |
r = radeon_gem_handle_lockup(rdev, r); |
return r; |