67,7 → 67,7 |
} |
|
r = radeon_bo_create(rdev, size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, |
0, NULL, &vram_obj); |
0, NULL, NULL, &vram_obj); |
if (r) { |
DRM_ERROR("Failed to create VRAM object\n"); |
goto out_cleanup; |
87,7 → 87,8 |
struct radeon_fence *fence = NULL; |
|
r = radeon_bo_create(rdev, size, PAGE_SIZE, true, |
RADEON_GEM_DOMAIN_GTT, 0, NULL, gtt_obj + i); |
RADEON_GEM_DOMAIN_GTT, 0, NULL, NULL, |
gtt_obj + i); |
if (r) { |
DRM_ERROR("Failed to create GTT object %d\n", i); |
goto out_lclean; |
116,11 → 117,16 |
radeon_bo_kunmap(gtt_obj[i]); |
|
if (ring == R600_RING_TYPE_DMA_INDEX) |
r = radeon_copy_dma(rdev, gtt_addr, vram_addr, size / RADEON_GPU_PAGE_SIZE, &fence); |
fence = radeon_copy_dma(rdev, gtt_addr, vram_addr, |
size / RADEON_GPU_PAGE_SIZE, |
NULL); |
else |
r = radeon_copy_blit(rdev, gtt_addr, vram_addr, size / RADEON_GPU_PAGE_SIZE, &fence); |
if (r) { |
fence = radeon_copy_blit(rdev, gtt_addr, vram_addr, |
size / RADEON_GPU_PAGE_SIZE, |
NULL); |
if (IS_ERR(fence)) { |
DRM_ERROR("Failed GTT->VRAM copy %d\n", i); |
r = PTR_ERR(fence); |
goto out_lclean_unpin; |
} |
|
162,11 → 168,16 |
radeon_bo_kunmap(vram_obj); |
|
if (ring == R600_RING_TYPE_DMA_INDEX) |
r = radeon_copy_dma(rdev, vram_addr, gtt_addr, size / RADEON_GPU_PAGE_SIZE, &fence); |
fence = radeon_copy_dma(rdev, vram_addr, gtt_addr, |
size / RADEON_GPU_PAGE_SIZE, |
NULL); |
else |
r = radeon_copy_blit(rdev, vram_addr, gtt_addr, size / RADEON_GPU_PAGE_SIZE, &fence); |
if (r) { |
fence = radeon_copy_blit(rdev, vram_addr, gtt_addr, |
size / RADEON_GPU_PAGE_SIZE, |
NULL); |
if (IS_ERR(fence)) { |
DRM_ERROR("Failed VRAM->GTT copy %d\n", i); |
r = PTR_ERR(fence); |
goto out_lclean_unpin; |
} |
|
222,7 → 233,7 |
radeon_bo_unreserve(gtt_obj[i]); |
radeon_bo_unref(>t_obj[i]); |
} |
if (fence) |
if (fence && !IS_ERR(fence)) |
radeon_fence_unref(&fence); |
break; |
} |