44,69 → 44,56 |
{ |
struct radeon_fence *fence; |
struct radeon_ib *nib; |
unsigned long i; |
int r = 0; |
int r = 0, i, c; |
|
*ib = NULL; |
r = radeon_fence_create(rdev, &fence); |
if (r) { |
DRM_ERROR("failed to create fence for new IB\n"); |
dev_err(rdev->dev, "failed to create fence for new IB\n"); |
return r; |
} |
mutex_lock(&rdev->ib_pool.mutex); |
i = find_first_zero_bit(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE); |
if (i < RADEON_IB_POOL_SIZE) { |
set_bit(i, rdev->ib_pool.alloc_bm); |
rdev->ib_pool.ibs[i].length_dw = 0; |
*ib = &rdev->ib_pool.ibs[i]; |
mutex_unlock(&rdev->ib_pool.mutex); |
goto out; |
for (i = rdev->ib_pool.head_id, c = 0, nib = NULL; c < RADEON_IB_POOL_SIZE; c++, i++) { |
i &= (RADEON_IB_POOL_SIZE - 1); |
if (rdev->ib_pool.ibs[i].free) { |
nib = &rdev->ib_pool.ibs[i]; |
break; |
} |
if (list_empty(&rdev->ib_pool.scheduled_ibs)) { |
/* we go do nothings here */ |
mutex_unlock(&rdev->ib_pool.mutex); |
DRM_ERROR("all IB allocated none scheduled.\n"); |
r = -EINVAL; |
goto out; |
} |
/* get the first ib on the scheduled list */ |
nib = list_entry(rdev->ib_pool.scheduled_ibs.next, |
struct radeon_ib, list); |
if (nib->fence == NULL) { |
/* we go do nothings here */ |
if (nib == NULL) { |
/* This should never happen, it means we allocated all |
* IB and haven't scheduled one yet, return EBUSY to |
* userspace hoping that on ioctl recall we get better |
* luck |
*/ |
dev_err(rdev->dev, "no free indirect buffer !\n"); |
mutex_unlock(&rdev->ib_pool.mutex); |
DRM_ERROR("IB %lu scheduled without a fence.\n", nib->idx); |
r = -EINVAL; |
goto out; |
radeon_fence_unref(&fence); |
return -EBUSY; |
} |
rdev->ib_pool.head_id = (nib->idx + 1) & (RADEON_IB_POOL_SIZE - 1); |
nib->free = false; |
if (nib->fence) { |
mutex_unlock(&rdev->ib_pool.mutex); |
|
r = radeon_fence_wait(nib->fence, false); |
if (r) { |
DRM_ERROR("radeon: IB(%lu:0x%016lX:%u)\n", nib->idx, |
(unsigned long)nib->gpu_addr, nib->length_dw); |
DRM_ERROR("radeon: GPU lockup detected, fail to get a IB\n"); |
goto out; |
dev_err(rdev->dev, "error waiting fence of IB(%u:0x%016lX:%u)\n", |
nib->idx, (unsigned long)nib->gpu_addr, nib->length_dw); |
mutex_lock(&rdev->ib_pool.mutex); |
nib->free = true; |
mutex_unlock(&rdev->ib_pool.mutex); |
radeon_fence_unref(&fence); |
return r; |
} |
mutex_lock(&rdev->ib_pool.mutex); |
} |
radeon_fence_unref(&nib->fence); |
|
nib->fence = fence; |
nib->length_dw = 0; |
|
/* scheduled list is accessed here */ |
mutex_lock(&rdev->ib_pool.mutex); |
list_del(&nib->list); |
INIT_LIST_HEAD(&nib->list); |
mutex_unlock(&rdev->ib_pool.mutex); |
|
*ib = nib; |
out: |
if (r) { |
radeon_fence_unref(&fence); |
} else { |
(*ib)->fence = fence; |
return 0; |
} |
return r; |
} |
|
void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib) |
{ |
116,22 → 103,13 |
if (tmp == NULL) { |
return; |
} |
if (!tmp->fence->emited) |
radeon_fence_unref(&tmp->fence); |
mutex_lock(&rdev->ib_pool.mutex); |
if (!list_empty(&tmp->list) && !radeon_fence_signaled(tmp->fence)) { |
/* IB is scheduled & not signaled don't do anythings */ |
tmp->free = true; |
mutex_unlock(&rdev->ib_pool.mutex); |
return; |
} |
list_del(&tmp->list); |
INIT_LIST_HEAD(&tmp->list); |
if (tmp->fence) |
radeon_fence_unref(&tmp->fence); |
|
tmp->length_dw = 0; |
clear_bit(tmp->idx, rdev->ib_pool.alloc_bm); |
mutex_unlock(&rdev->ib_pool.mutex); |
} |
|
int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib) |
{ |
int r = 0; |
138,7 → 116,7 |
|
if (!ib->length_dw || !rdev->cp.ready) { |
/* TODO: Nothings in the ib we should report. */ |
DRM_ERROR("radeon: couldn't schedule IB(%lu).\n", ib->idx); |
DRM_ERROR("radeon: couldn't schedule IB(%u).\n", ib->idx); |
return -EINVAL; |
} |
|
151,7 → 129,8 |
radeon_ring_ib_execute(rdev, ib); |
radeon_fence_emit(rdev, ib->fence); |
mutex_lock(&rdev->ib_pool.mutex); |
list_add_tail(&ib->list, &rdev->ib_pool.scheduled_ibs); |
/* once scheduled IB is considered free and protected by the fence */ |
ib->free = true; |
mutex_unlock(&rdev->ib_pool.mutex); |
radeon_ring_unlock_commit(rdev); |
return 0; |
168,7 → 147,6 |
if (rdev->ib_pool.robj) |
return 0; |
/* Allocate 1M object buffer */ |
INIT_LIST_HEAD(&rdev->ib_pool.scheduled_ibs); |
r = radeon_bo_create(rdev, NULL, RADEON_IB_POOL_SIZE*64*1024, |
true, RADEON_GEM_DOMAIN_GTT, |
&rdev->ib_pool.robj); |
199,9 → 177,9 |
rdev->ib_pool.ibs[i].ptr = ptr + offset; |
rdev->ib_pool.ibs[i].idx = i; |
rdev->ib_pool.ibs[i].length_dw = 0; |
INIT_LIST_HEAD(&rdev->ib_pool.ibs[i].list); |
rdev->ib_pool.ibs[i].free = true; |
} |
bitmap_zero(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE); |
rdev->ib_pool.head_id = 0; |
rdev->ib_pool.ready = true; |
DRM_INFO("radeon: ib pool ready.\n"); |
if (radeon_debugfs_ib_init(rdev)) { |
218,7 → 196,6 |
return; |
} |
mutex_lock(&rdev->ib_pool.mutex); |
bitmap_zero(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE); |
if (rdev->ib_pool.robj) { |
r = radeon_bo_reserve(rdev->ib_pool.robj, false); |
if (likely(r == 0)) { |
372,7 → 349,7 |
if (ib == NULL) { |
return 0; |
} |
seq_printf(m, "IB %04lu\n", ib->idx); |
seq_printf(m, "IB %04u\n", ib->idx); |
seq_printf(m, "IB fence %p\n", ib->fence); |
seq_printf(m, "IB size %05u dwords\n", ib->length_dw); |
for (i = 0; i < ib->length_dw; i++) { |