62,7 → 62,9 |
{ |
struct radeon_fence_driver *drv = &rdev->fence_drv[ring]; |
if (likely(rdev->wb.enabled || !drv->scratch_reg)) { |
if (drv->cpu_addr) { |
*drv->cpu_addr = cpu_to_le32(seq); |
} |
} else { |
WREG32(drv->scratch_reg, seq); |
} |
83,8 → 85,12 |
u32 seq = 0; |
|
if (likely(rdev->wb.enabled || !drv->scratch_reg)) { |
if (drv->cpu_addr) { |
seq = le32_to_cpu(*drv->cpu_addr); |
} else { |
seq = lower_32_bits(atomic64_read(&drv->last_seq)); |
} |
} else { |
seq = RREG32(drv->scratch_reg); |
} |
return seq; |
767,8 → 773,20 |
radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg); |
if (rdev->wb.use_event || !radeon_ring_supports_scratch_reg(rdev, &rdev->ring[ring])) { |
rdev->fence_drv[ring].scratch_reg = 0; |
if (ring != R600_RING_TYPE_UVD_INDEX) { |
index = R600_WB_EVENT_OFFSET + ring * 4; |
rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4]; |
rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr + |
index; |
|
} else { |
/* put fence directly behind firmware */ |
index = ALIGN(rdev->uvd_fw->size, 8); |
rdev->fence_drv[ring].cpu_addr = rdev->uvd.cpu_addr + index; |
rdev->fence_drv[ring].gpu_addr = rdev->uvd.gpu_addr + index; |
} |
|
} else { |
r = radeon_scratch_get(rdev, &rdev->fence_drv[ring].scratch_reg); |
if (r) { |
dev_err(rdev->dev, "fence failed to get scratch register\n"); |
777,9 → 795,9 |
index = RADEON_WB_SCRATCH_OFFSET + |
rdev->fence_drv[ring].scratch_reg - |
rdev->scratch.reg_base; |
} |
rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4]; |
rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr + index; |
} |
radeon_fence_write(rdev, atomic64_read(&rdev->fence_drv[ring].last_seq), ring); |
rdev->fence_drv[ring].initialized = true; |
dev_info(rdev->dev, "fence driver on ring %d use gpu addr 0x%016llx and cpu addr 0x%p\n", |