30,7 → 30,7 |
*/ |
#include <linux/seq_file.h> |
#include <asm/atomic.h> |
//#include <linux/wait.h> |
#include <linux/wait.h> |
#include <linux/list.h> |
#include <linux/kref.h> |
#include <linux/slab.h> |
303,21 → 303,19 |
|
// trace_radeon_fence_wait_begin(rdev->ddev, seq); |
radeon_irq_kms_sw_irq_get(rdev, ring); |
// if (intr) { |
// r = wait_event_interruptible_timeout(rdev->fence_queue, |
// (signaled = radeon_fence_seq_signaled(rdev, target_seq, ring)), |
// timeout); |
// } else { |
// r = wait_event_timeout(rdev->fence_queue, |
// (signaled = radeon_fence_seq_signaled(rdev, target_seq, ring)), |
// timeout); |
// } |
delay(1); |
|
if (intr) { |
r = wait_event_interruptible_timeout(rdev->fence_queue, |
(signaled = radeon_fence_seq_signaled(rdev, target_seq, ring)), |
timeout); |
} else { |
r = wait_event_timeout(rdev->fence_queue, |
(signaled = radeon_fence_seq_signaled(rdev, target_seq, ring)), |
timeout); |
} |
radeon_irq_kms_sw_irq_put(rdev, ring); |
// if (unlikely(r < 0)) { |
// return r; |
// } |
if (unlikely(r < 0)) { |
return r; |
} |
// trace_radeon_fence_wait_end(rdev->ddev, seq); |
|
if (unlikely(!signaled)) { |
474,11 → 472,15 |
radeon_irq_kms_sw_irq_get(rdev, i); |
} |
} |
|
// WaitEvent(fence->evnt); |
|
r = 1; |
|
if (intr) { |
r = wait_event_interruptible_timeout(rdev->fence_queue, |
(signaled = radeon_fence_any_seq_signaled(rdev, target_seq)), |
timeout); |
} else { |
r = wait_event_timeout(rdev->fence_queue, |
(signaled = radeon_fence_any_seq_signaled(rdev, target_seq)), |
timeout); |
} |
for (i = 0; i < RADEON_NUM_RINGS; ++i) { |
if (target_seq[i]) { |
radeon_irq_kms_sw_irq_put(rdev, i); |
606,27 → 608,21 |
* Returns 0 if the fences have passed, error for all other cases. |
* Caller must hold ring lock. |
*/ |
void radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring) |
int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring) |
{ |
uint64_t seq = rdev->fence_drv[ring].sync_seq[ring]; |
int r; |
|
while(1) { |
int r; |
r = radeon_fence_wait_seq(rdev, seq, ring, false, false); |
if (r) { |
if (r == -EDEADLK) { |
mutex_unlock(&rdev->ring_lock); |
r = radeon_gpu_reset(rdev); |
mutex_lock(&rdev->ring_lock); |
if (!r) |
continue; |
return -EDEADLK; |
} |
if (r) { |
dev_err(rdev->dev, "error waiting for ring to become" |
" idle (%d)\n", r); |
dev_err(rdev->dev, "error waiting for ring[%d] to become idle (%d)\n", |
ring, r); |
} |
return; |
return 0; |
} |
} |
|
/** |
* radeon_fence_ref - take a ref on a fence |
769,7 → 765,7 |
int r; |
|
radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg); |
if (rdev->wb.use_event) { |
if (rdev->wb.use_event || !radeon_ring_supports_scratch_reg(rdev, &rdev->ring[ring])) { |
rdev->fence_drv[ring].scratch_reg = 0; |
index = R600_WB_EVENT_OFFSET + ring * 4; |
} else { |
851,13 → 847,17 |
*/ |
void radeon_fence_driver_fini(struct radeon_device *rdev) |
{ |
int ring; |
int ring, r; |
|
mutex_lock(&rdev->ring_lock); |
for (ring = 0; ring < RADEON_NUM_RINGS; ring++) { |
if (!rdev->fence_drv[ring].initialized) |
continue; |
radeon_fence_wait_empty_locked(rdev, ring); |
r = radeon_fence_wait_empty_locked(rdev, ring); |
if (r) { |
/* no need to trigger GPU reset as we are unloading */ |
radeon_fence_driver_force_completion(rdev); |
} |
wake_up_all(&rdev->fence_queue); |
radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg); |
rdev->fence_drv[ring].initialized = false; |
865,7 → 865,26 |
mutex_unlock(&rdev->ring_lock); |
} |
|
/** |
* radeon_fence_driver_force_completion - force all fence waiter to complete |
* |
* @rdev: radeon device pointer |
* |
* In case of GPU reset failure make sure no process keep waiting on fence |
* that will never complete. |
*/ |
void radeon_fence_driver_force_completion(struct radeon_device *rdev) |
{ |
int ring; |
|
for (ring = 0; ring < RADEON_NUM_RINGS; ring++) { |
if (!rdev->fence_drv[ring].initialized) |
continue; |
radeon_fence_write(rdev, rdev->fence_drv[ring].sync_seq[ring], ring); |
} |
} |
|
|
/* |
* Fence debugfs |
*/ |