408,7 → 408,7 |
* |
* Check if the requested fence has signaled (all asics). |
* Returns true if the fence has signaled or false if it has not. |
*/ |
*/ |
bool radeon_fence_signaled(struct radeon_fence *fence) |
{ |
if (!fence) |
475,40 → 475,40 |
return timeout; |
|
/* enable IRQs and tracing */ |
for (i = 0; i < RADEON_NUM_RINGS; ++i) { |
if (!target_seq[i]) |
continue; |
for (i = 0; i < RADEON_NUM_RINGS; ++i) { |
if (!target_seq[i]) |
continue; |
|
trace_radeon_fence_wait_begin(rdev->ddev, i, target_seq[i]); |
radeon_irq_kms_sw_irq_get(rdev, i); |
} |
trace_radeon_fence_wait_begin(rdev->ddev, i, target_seq[i]); |
radeon_irq_kms_sw_irq_get(rdev, i); |
} |
|
if (intr) { |
r = wait_event_interruptible_timeout(rdev->fence_queue, ( |
if (intr) { |
r = wait_event_interruptible_timeout(rdev->fence_queue, ( |
radeon_fence_any_seq_signaled(rdev, target_seq) |
|| rdev->needs_reset), timeout); |
} else { |
r = wait_event_timeout(rdev->fence_queue, ( |
} else { |
r = wait_event_timeout(rdev->fence_queue, ( |
radeon_fence_any_seq_signaled(rdev, target_seq) |
|| rdev->needs_reset), timeout); |
} |
} |
|
if (rdev->needs_reset) |
r = -EDEADLK; |
|
for (i = 0; i < RADEON_NUM_RINGS; ++i) { |
if (!target_seq[i]) |
continue; |
for (i = 0; i < RADEON_NUM_RINGS; ++i) { |
if (!target_seq[i]) |
continue; |
|
radeon_irq_kms_sw_irq_put(rdev, i); |
trace_radeon_fence_wait_end(rdev->ddev, i, target_seq[i]); |
} |
radeon_irq_kms_sw_irq_put(rdev, i); |
trace_radeon_fence_wait_end(rdev->ddev, i, target_seq[i]); |
} |
|
return r; |
return r; |
} |
|
/** |
* radeon_fence_wait - wait for a fence to signal |
* radeon_fence_wait_timeout - wait for a fence to signal with timeout |
* |
* @fence: radeon fence object |
* @intr: use interruptible sleep |
516,12 → 516,15 |
* Wait for the requested fence to signal (all asics). |
* @intr selects whether to use interruptable (true) or non-interruptable |
* (false) sleep when waiting for the fence. |
* Returns 0 if the fence has passed, error for all other cases. |
* @timeout: maximum time to wait, or MAX_SCHEDULE_TIMEOUT for infinite wait |
* Returns remaining time if the sequence number has passed, 0 when |
* the wait timeout, or an error for all other cases. |
*/ |
int radeon_fence_wait(struct radeon_fence *fence, bool intr) |
long radeon_fence_wait_timeout(struct radeon_fence *fence, bool intr, long timeout) |
{ |
uint64_t seq[RADEON_NUM_RINGS] = {}; |
long r; |
int r_sig; |
|
/* |
* This function should not be called on !radeon fences. |
533,18 → 536,39 |
return fence_wait(&fence->base, intr); |
|
seq[fence->ring] = fence->seq; |
r = radeon_fence_wait_seq_timeout(fence->rdev, seq, intr, MAX_SCHEDULE_TIMEOUT); |
if (r < 0) { |
return r; |
r = radeon_fence_wait_seq_timeout(fence->rdev, seq, intr, timeout); |
if (r <= 0) { |
return r; |
} |
|
r = fence_signal(&fence->base); |
if (!r) |
r_sig = fence_signal(&fence->base); |
if (!r_sig) |
FENCE_TRACE(&fence->base, "signaled from fence_wait\n"); |
return 0; |
return r; |
} |
|
/** |
* radeon_fence_wait - wait for a fence to signal |
* |
* @fence: radeon fence object |
* @intr: use interruptible sleep |
* |
* Wait for the requested fence to signal (all asics). |
* @intr selects whether to use interruptable (true) or non-interruptable |
* (false) sleep when waiting for the fence. |
* Returns 0 if the fence has passed, error for all other cases. |
*/ |
int radeon_fence_wait(struct radeon_fence *fence, bool intr) |
{ |
long r = radeon_fence_wait_timeout(fence, intr, MAX_SCHEDULE_TIMEOUT); |
if (r > 0) { |
return 0; |
} else { |
return r; |
} |
} |
|
/** |
* radeon_fence_wait_any - wait for a fence to signal on any ring |
* |
* @rdev: radeon device pointer |