Subversion Repositories Kolibri OS

Rev

Rev 1963 | Rev 2004 | Go to most recent revision | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 1963 Rev 1986
Line 31... Line 31...
31
#include 
31
#include 
32
#include 
32
#include 
33
#include 
33
#include 
34
#include 
34
#include 
35
#include 
35
#include 
-
 
36
#include 
36
#include "drmP.h"
37
#include "drmP.h"
37
#include "drm.h"
38
#include "drm.h"
38
#include "radeon_reg.h"
39
#include "radeon_reg.h"
39
#include "radeon.h"
40
#include "radeon.h"
Line 54... Line 55...
54
		 */
55
		 */
55
		WREG32(rdev->fence_drv.scratch_reg, fence->seq);
56
		WREG32(rdev->fence_drv.scratch_reg, fence->seq);
56
	} else
57
	} else
57
		radeon_fence_ring_emit(rdev, fence);
58
		radeon_fence_ring_emit(rdev, fence);
Line -... Line 59...
-
 
59
 
58
 
60
	trace_radeon_fence_emit(rdev->ddev, fence->seq);
59
	fence->emited = true;
-
 
60
	list_del(&fence->list);
61
	fence->emited = true;
61
	list_add_tail(&fence->list, &rdev->fence_drv.emited);
62
	list_move_tail(&fence->list, &rdev->fence_drv.emited);
62
	write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
63
	write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
63
	return 0;
64
	return 0;
Line 64... Line 65...
64
}
65
}
Line 75... Line 76...
75
		u32 scratch_index;
76
		u32 scratch_index;
76
		if (rdev->wb.use_event)
77
		if (rdev->wb.use_event)
77
			scratch_index = R600_WB_EVENT_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
78
			scratch_index = R600_WB_EVENT_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
78
		else
79
		else
79
			scratch_index = RADEON_WB_SCRATCH_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
80
			scratch_index = RADEON_WB_SCRATCH_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
80
		seq = rdev->wb.wb[scratch_index/4];
81
		seq = le32_to_cpu(rdev->wb.wb[scratch_index/4]);
81
	} else
82
	} else
82
	seq = RREG32(rdev->fence_drv.scratch_reg);
83
	seq = RREG32(rdev->fence_drv.scratch_reg);
83
	if (seq != rdev->fence_drv.last_seq) {
84
	if (seq != rdev->fence_drv.last_seq) {
84
		rdev->fence_drv.last_seq = seq;
85
		rdev->fence_drv.last_seq = seq;
85
		rdev->fence_drv.last_jiffies = jiffies;
86
		rdev->fence_drv.last_jiffies = jiffies;
Line 116... Line 117...
116
	/* all fence previous to this one are considered as signaled */
117
	/* all fence previous to this one are considered as signaled */
117
	if (n) {
118
	if (n) {
118
		i = n;
119
		i = n;
119
		do {
120
		do {
120
			n = i->prev;
121
			n = i->prev;
121
			list_del(i);
-
 
122
			list_add_tail(i, &rdev->fence_drv.signaled);
122
			list_move_tail(i, &rdev->fence_drv.signaled);
123
			fence = list_entry(i, struct radeon_fence, list);
123
			fence = list_entry(i, struct radeon_fence, list);
124
			fence->signaled = true;
124
			fence->signaled = true;
125
			i = n;
125
			i = n;
126
		} while (i != &rdev->fence_drv.emited);
126
		} while (i != &rdev->fence_drv.emited);
127
		wake = true;
127
		wake = true;
Line 210... Line 210...
210
	}
210
	}
211
	timeout = rdev->fence_drv.last_timeout;
211
	timeout = rdev->fence_drv.last_timeout;
212
retry:
212
retry:
213
	/* save current sequence used to check for GPU lockup */
213
	/* save current sequence used to check for GPU lockup */
214
	seq = rdev->fence_drv.last_seq;
214
	seq = rdev->fence_drv.last_seq;
-
 
215
	trace_radeon_fence_wait_begin(rdev->ddev, seq);
215
	if (intr) {
216
	if (intr) {
216
		radeon_irq_kms_sw_irq_get(rdev);
217
		radeon_irq_kms_sw_irq_get(rdev);
217
		r = wait_event_interruptible_timeout(rdev->fence_drv.queue,
218
		r = wait_event_interruptible_timeout(rdev->fence_drv.queue,
218
				radeon_fence_signaled(fence), timeout);
219
				radeon_fence_signaled(fence), timeout);
219
		radeon_irq_kms_sw_irq_put(rdev);
220
		radeon_irq_kms_sw_irq_put(rdev);
Line 224... Line 225...
224
		radeon_irq_kms_sw_irq_get(rdev);
225
		radeon_irq_kms_sw_irq_get(rdev);
225
		r = wait_event_timeout(rdev->fence_drv.queue,
226
		r = wait_event_timeout(rdev->fence_drv.queue,
226
			 radeon_fence_signaled(fence), timeout);
227
			 radeon_fence_signaled(fence), timeout);
227
		radeon_irq_kms_sw_irq_put(rdev);
228
		radeon_irq_kms_sw_irq_put(rdev);
228
	}
229
	}
-
 
230
	trace_radeon_fence_wait_end(rdev->ddev, seq);
229
	if (unlikely(!radeon_fence_signaled(fence))) {
231
	if (unlikely(!radeon_fence_signaled(fence))) {
230
		/* we were interrupted for some reason and fence isn't
232
		/* we were interrupted for some reason and fence isn't
231
		 * isn't signaled yet, resume wait
233
		 * isn't signaled yet, resume wait
232
		 */
234
		 */
233
		if (r) {
235
		if (r) {
Line 317... Line 319...
317
{
319
{
318
	struct radeon_fence *tmp = *fence;
320
	struct radeon_fence *tmp = *fence;
Line 319... Line 321...
319
 
321
 
320
	*fence = NULL;
322
	*fence = NULL;
321
	if (tmp) {
323
	if (tmp) {
322
		kref_put(&tmp->kref, &radeon_fence_destroy);
324
		kref_put(&tmp->kref, radeon_fence_destroy);
323
	}
325
	}
Line 324... Line 326...
324
}
326
}
325
 
327