Subversion Repositories Kolibri OS

Rev

Rev 5128 | Rev 6104 | Go to most recent revision | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 5128 Rev 5271
Line 32... Line 32...
32
#include "radeon_trace.h"
32
#include "radeon_trace.h"
Line 33... Line 33...
33
 
33
 
34
int radeon_semaphore_create(struct radeon_device *rdev,
34
int radeon_semaphore_create(struct radeon_device *rdev,
35
			    struct radeon_semaphore **semaphore)
35
			    struct radeon_semaphore **semaphore)
36
{
-
 
37
	uint64_t *cpu_addr;
36
{
Line 38... Line 37...
38
	int i, r;
37
	int r;
39
 
38
 
40
	*semaphore = kmalloc(sizeof(struct radeon_semaphore), GFP_KERNEL);
39
	*semaphore = kmalloc(sizeof(struct radeon_semaphore), GFP_KERNEL);
41
	if (*semaphore == NULL) {
40
	if (*semaphore == NULL) {
42
		return -ENOMEM;
41
		return -ENOMEM;
43
	}
42
	}
44
	r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, &(*semaphore)->sa_bo,
43
	r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo,
45
			     8 * RADEON_NUM_SYNCS, 8);
44
			     &(*semaphore)->sa_bo, 8, 8);
46
	if (r) {
45
	if (r) {
47
		kfree(*semaphore);
46
		kfree(*semaphore);
48
		*semaphore = NULL;
47
		*semaphore = NULL;
49
		return r;
48
		return r;
50
	}
49
	}
Line 51... Line 50...
51
	(*semaphore)->waiters = 0;
50
	(*semaphore)->waiters = 0;
52
	(*semaphore)->gpu_addr = radeon_sa_bo_gpu_addr((*semaphore)->sa_bo);
-
 
53
 
-
 
54
	cpu_addr = radeon_sa_bo_cpu_addr((*semaphore)->sa_bo);
-
 
55
	for (i = 0; i < RADEON_NUM_SYNCS; ++i)
-
 
56
		cpu_addr[i] = 0;
-
 
Line 57... Line 51...
57
 
51
	(*semaphore)->gpu_addr = radeon_sa_bo_gpu_addr((*semaphore)->sa_bo);
58
	for (i = 0; i < RADEON_NUM_RINGS; ++i)
52
 
Line 59... Line 53...
59
		(*semaphore)->sync_to[i] = NULL;
53
	*((uint64_t *)radeon_sa_bo_cpu_addr((*semaphore)->sa_bo)) = 0;
Line 93... Line 87...
93
		return true;
87
		return true;
94
	}
88
	}
95
	return false;
89
	return false;
96
}
90
}
Line 97... Line -...
97
 
-
 
98
/**
-
 
99
 * radeon_semaphore_sync_to - use the semaphore to sync to a fence
-
 
100
 *
-
 
101
 * @semaphore: semaphore object to add fence to
-
 
102
 * @fence: fence to sync to
-
 
103
 *
-
 
104
 * Sync to the fence using this semaphore object
-
 
105
 */
-
 
106
void radeon_semaphore_sync_to(struct radeon_semaphore *semaphore,
-
 
107
			      struct radeon_fence *fence)
-
 
108
{
-
 
109
        struct radeon_fence *other;
-
 
110
 
-
 
111
        if (!fence)
-
 
112
                return;
-
 
113
 
-
 
114
        other = semaphore->sync_to[fence->ring];
-
 
115
        semaphore->sync_to[fence->ring] = radeon_fence_later(fence, other);
-
 
116
}
-
 
117
 
-
 
118
/**
-
 
119
 * radeon_semaphore_sync_rings - sync ring to all registered fences
-
 
120
 *
-
 
121
 * @rdev: radeon_device pointer
-
 
122
 * @semaphore: semaphore object to use for sync
-
 
123
 * @ring: ring that needs sync
-
 
124
 *
-
 
125
 * Ensure that all registered fences are signaled before letting
-
 
126
 * the ring continue. The caller must hold the ring lock.
-
 
127
 */
-
 
128
int radeon_semaphore_sync_rings(struct radeon_device *rdev,
-
 
129
				struct radeon_semaphore *semaphore,
-
 
130
				int ring)
-
 
131
{
-
 
132
	unsigned count = 0;
-
 
133
	int i, r;
-
 
134
 
-
 
135
        for (i = 0; i < RADEON_NUM_RINGS; ++i) {
-
 
136
		struct radeon_fence *fence = semaphore->sync_to[i];
-
 
137
 
-
 
138
		/* check if we really need to sync */
-
 
139
                if (!radeon_fence_need_sync(fence, ring))
-
 
140
			continue;
-
 
141
 
-
 
142
	/* prevent GPU deadlocks */
-
 
143
		if (!rdev->ring[i].ready) {
-
 
144
			dev_err(rdev->dev, "Syncing to a disabled ring!");
-
 
145
		return -EINVAL;
-
 
146
	}
-
 
147
 
-
 
148
		if (++count > RADEON_NUM_SYNCS) {
-
 
149
			/* not enough room, wait manually */
-
 
150
			r = radeon_fence_wait(fence, false);
-
 
151
			if (r)
-
 
152
				return r;
-
 
153
			continue;
-
 
154
		}
-
 
155
 
-
 
156
		/* allocate enough space for sync command */
-
 
157
		r = radeon_ring_alloc(rdev, &rdev->ring[i], 16);
-
 
158
	if (r) {
-
 
159
		return r;
-
 
160
	}
-
 
161
 
-
 
162
		/* emit the signal semaphore */
-
 
163
		if (!radeon_semaphore_emit_signal(rdev, i, semaphore)) {
-
 
164
			/* signaling wasn't successful wait manually */
-
 
165
			radeon_ring_undo(&rdev->ring[i]);
-
 
166
			r = radeon_fence_wait(fence, false);
-
 
167
			if (r)
-
 
168
				return r;
-
 
169
			continue;
-
 
170
		}
-
 
171
 
-
 
172
	/* we assume caller has already allocated space on waiters ring */
-
 
173
		if (!radeon_semaphore_emit_wait(rdev, ring, semaphore)) {
-
 
174
			/* waiting wasn't successful wait manually */
-
 
175
			radeon_ring_undo(&rdev->ring[i]);
-
 
176
			r = radeon_fence_wait(fence, false);
-
 
177
			if (r)
-
 
178
				return r;
-
 
179
			continue;
-
 
180
		}
-
 
181
 
-
 
182
		radeon_ring_commit(rdev, &rdev->ring[i], false);
-
 
183
		radeon_fence_note_sync(fence, ring);
-
 
184
 
-
 
185
		semaphore->gpu_addr += 8;
-
 
186
	}
-
 
187
 
-
 
188
	return 0;
-
 
189
}
-
 
190
 
91
 
191
void radeon_semaphore_free(struct radeon_device *rdev,
92
void radeon_semaphore_free(struct radeon_device *rdev,
192
			   struct radeon_semaphore **semaphore,
93
			   struct radeon_semaphore **semaphore,
193
			   struct radeon_fence *fence)
94
			   struct radeon_fence *fence)
194
{
95
{