33,18 → 33,19 |
* @src_offset: src GPU address |
* @dst_offset: dst GPU address |
* @num_gpu_pages: number of GPU pages to xfer |
* @fence: radeon fence object |
* @resv: reservation object to sync to |
* |
* Copy GPU paging using the DMA engine (r7xx). |
* Used by the radeon ttm implementation to move pages if |
* registered as the asic copy callback. |
*/ |
int rv770_copy_dma(struct radeon_device *rdev, |
struct radeon_fence *rv770_copy_dma(struct radeon_device *rdev, |
uint64_t src_offset, uint64_t dst_offset, |
unsigned num_gpu_pages, |
struct radeon_fence **fence) |
struct reservation_object *resv) |
{ |
struct radeon_semaphore *sem = NULL; |
struct radeon_fence *fence; |
struct radeon_sync sync; |
int ring_index = rdev->asic->copy.dma_ring_index; |
struct radeon_ring *ring = &rdev->ring[ring_index]; |
u32 size_in_dw, cur_size_in_dw; |
51,11 → 52,7 |
int i, num_loops; |
int r = 0; |
|
r = radeon_semaphore_create(rdev, &sem); |
if (r) { |
DRM_ERROR("radeon: moving bo (%d).\n", r); |
return r; |
} |
radeon_sync_create(&sync); |
|
size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4; |
num_loops = DIV_ROUND_UP(size_in_dw, 0xFFFF); |
62,12 → 59,12 |
r = radeon_ring_lock(rdev, ring, num_loops * 5 + 8); |
if (r) { |
DRM_ERROR("radeon: moving bo (%d).\n", r); |
radeon_semaphore_free(rdev, &sem, NULL); |
return r; |
radeon_sync_free(rdev, &sync, NULL); |
return ERR_PTR(r); |
} |
|
radeon_semaphore_sync_to(sem, *fence); |
radeon_semaphore_sync_rings(rdev, sem, ring->idx); |
radeon_sync_resv(rdev, &sync, resv, false); |
radeon_sync_rings(rdev, &sync, ring->idx); |
|
for (i = 0; i < num_loops; i++) { |
cur_size_in_dw = size_in_dw; |
83,15 → 80,15 |
dst_offset += cur_size_in_dw * 4; |
} |
|
r = radeon_fence_emit(rdev, fence, ring->idx); |
r = radeon_fence_emit(rdev, &fence, ring->idx); |
if (r) { |
radeon_ring_unlock_undo(rdev, ring); |
radeon_semaphore_free(rdev, &sem, NULL); |
return r; |
radeon_sync_free(rdev, &sync, NULL); |
return ERR_PTR(r); |
} |
|
radeon_ring_unlock_commit(rdev, ring, false); |
radeon_semaphore_free(rdev, &sem, *fence); |
radeon_sync_free(rdev, &sync, fence); |
|
return r; |
return fence; |
} |