Subversion Repositories Kolibri OS

Compare Revisions

Regard whitespace Rev 5270 → Rev 5271

/drivers/video/drm/radeon/cik_sdma.c
134,7 → 134,7
struct radeon_ib *ib)
{
struct radeon_ring *ring = &rdev->ring[ib->ring];
u32 extra_bits = (ib->vm ? ib->vm->id : 0) & 0xf;
u32 extra_bits = (ib->vm ? ib->vm->ids[ib->ring].id : 0) & 0xf;
 
if (rdev->wb.enabled) {
u32 next_rptr = ring->wptr + 5;
530,18 → 530,19
* @src_offset: src GPU address
* @dst_offset: dst GPU address
* @num_gpu_pages: number of GPU pages to xfer
* @fence: radeon fence object
* @resv: reservation object to sync to
*
* Copy GPU paging using the DMA engine (CIK).
* Used by the radeon ttm implementation to move pages if
* registered as the asic copy callback.
*/
int cik_copy_dma(struct radeon_device *rdev,
struct radeon_fence *cik_copy_dma(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset,
unsigned num_gpu_pages,
struct radeon_fence **fence)
struct reservation_object *resv)
{
struct radeon_semaphore *sem = NULL;
struct radeon_fence *fence;
struct radeon_sync sync;
int ring_index = rdev->asic->copy.dma_ring_index;
struct radeon_ring *ring = &rdev->ring[ring_index];
u32 size_in_bytes, cur_size_in_bytes;
548,11 → 549,7
int i, num_loops;
int r = 0;
 
r = radeon_semaphore_create(rdev, &sem);
if (r) {
DRM_ERROR("radeon: moving bo (%d).\n", r);
return r;
}
radeon_sync_create(&sync);
 
size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
num_loops = DIV_ROUND_UP(size_in_bytes, 0x1fffff);
559,12 → 556,12
r = radeon_ring_lock(rdev, ring, num_loops * 7 + 14);
if (r) {
DRM_ERROR("radeon: moving bo (%d).\n", r);
radeon_semaphore_free(rdev, &sem, NULL);
return r;
radeon_sync_free(rdev, &sync, NULL);
return ERR_PTR(r);
}
 
radeon_semaphore_sync_to(sem, *fence);
radeon_semaphore_sync_rings(rdev, sem, ring->idx);
radeon_sync_resv(rdev, &sync, resv, false);
radeon_sync_rings(rdev, &sync, ring->idx);
 
for (i = 0; i < num_loops; i++) {
cur_size_in_bytes = size_in_bytes;
582,17 → 579,17
dst_offset += cur_size_in_bytes;
}
 
r = radeon_fence_emit(rdev, fence, ring->idx);
r = radeon_fence_emit(rdev, &fence, ring->idx);
if (r) {
radeon_ring_unlock_undo(rdev, ring);
radeon_semaphore_free(rdev, &sem, NULL);
return r;
radeon_sync_free(rdev, &sync, NULL);
return ERR_PTR(r);
}
 
radeon_ring_unlock_commit(rdev, ring, false);
radeon_semaphore_free(rdev, &sem, *fence);
radeon_sync_free(rdev, &sync, fence);
 
return r;
return fence;
}
 
/**
666,17 → 663,20
{
struct radeon_ib ib;
unsigned i;
unsigned index;
int r;
void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
u32 tmp = 0;
u64 gpu_addr;
 
if (!ptr) {
DRM_ERROR("invalid vram scratch pointer\n");
return -EINVAL;
}
if (ring->idx == R600_RING_TYPE_DMA_INDEX)
index = R600_WB_DMA_RING_TEST_OFFSET;
else
index = CAYMAN_WB_DMA1_RING_TEST_OFFSET;
 
gpu_addr = rdev->wb.gpu_addr + index;
 
tmp = 0xCAFEDEAD;
writel(tmp, ptr);
rdev->wb.wb[index/4] = cpu_to_le32(tmp);
 
r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256);
if (r) {
685,8 → 685,8
}
 
ib.ptr[0] = SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
ib.ptr[1] = rdev->vram_scratch.gpu_addr & 0xfffffffc;
ib.ptr[2] = upper_32_bits(rdev->vram_scratch.gpu_addr);
ib.ptr[1] = lower_32_bits(gpu_addr);
ib.ptr[2] = upper_32_bits(gpu_addr);
ib.ptr[3] = 1;
ib.ptr[4] = 0xDEADBEEF;
ib.length_dw = 5;
703,7 → 703,7
return r;
}
for (i = 0; i < rdev->usec_timeout; i++) {
tmp = readl(ptr);
tmp = le32_to_cpu(rdev->wb.wb[index/4]);
if (tmp == 0xDEADBEEF)
break;
DRM_UDELAY(1);
900,25 → 900,21
* Update the page table base and flush the VM TLB
* using sDMA (CIK).
*/
void cik_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
void cik_dma_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
unsigned vm_id, uint64_t pd_addr)
{
struct radeon_ring *ring = &rdev->ring[ridx];
 
if (vm == NULL)
return;
 
radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
if (vm->id < 8) {
radeon_ring_write(ring, (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2);
if (vm_id < 8) {
radeon_ring_write(ring, (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm_id << 2)) >> 2);
} else {
radeon_ring_write(ring, (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm->id - 8) << 2)) >> 2);
radeon_ring_write(ring, (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm_id - 8) << 2)) >> 2);
}
radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
radeon_ring_write(ring, pd_addr >> 12);
 
/* update SH_MEM_* regs */
radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
radeon_ring_write(ring, SRBM_GFX_CNTL >> 2);
radeon_ring_write(ring, VMID(vm->id));
radeon_ring_write(ring, VMID(vm_id));
 
radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
radeon_ring_write(ring, SH_MEM_BASES >> 2);
941,11 → 937,11
radeon_ring_write(ring, VMID(0));
 
/* flush HDP */
cik_sdma_hdp_flush_ring_emit(rdev, ridx);
cik_sdma_hdp_flush_ring_emit(rdev, ring->idx);
 
/* flush TLB */
radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
radeon_ring_write(ring, 1 << vm->id);
radeon_ring_write(ring, 1 << vm_id);
}