Subversion Repositories Kolibri OS

Rev

Rev 3764 | Rev 5128 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 3764 Rev 5078
1
/*
1
/*
2
 * Copyright 2011 Christian König.
2
 * Copyright 2011 Christian König.
3
 * All Rights Reserved.
3
 * All Rights Reserved.
4
 *
4
 *
5
 * Permission is hereby granted, free of charge, to any person obtaining a
5
 * Permission is hereby granted, free of charge, to any person obtaining a
6
 * copy of this software and associated documentation files (the
6
 * copy of this software and associated documentation files (the
7
 * "Software"), to deal in the Software without restriction, including
7
 * "Software"), to deal in the Software without restriction, including
8
 * without limitation the rights to use, copy, modify, merge, publish,
8
 * without limitation the rights to use, copy, modify, merge, publish,
9
 * distribute, sub license, and/or sell copies of the Software, and to
9
 * distribute, sub license, and/or sell copies of the Software, and to
10
 * permit persons to whom the Software is furnished to do so, subject to
10
 * permit persons to whom the Software is furnished to do so, subject to
11
 * the following conditions:
11
 * the following conditions:
12
 *
12
 *
13
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
13
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
15
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16
 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
16
 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17
 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
17
 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18
 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
18
 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19
 * USE OR OTHER DEALINGS IN THE SOFTWARE.
19
 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20
 *
20
 *
21
 * The above copyright notice and this permission notice (including the
21
 * The above copyright notice and this permission notice (including the
22
 * next paragraph) shall be included in all copies or substantial portions
22
 * next paragraph) shall be included in all copies or substantial portions
23
 * of the Software.
23
 * of the Software.
24
 *
24
 *
25
 */
25
 */
26
/*
26
/*
27
 * Authors:
27
 * Authors:
28
 *    Christian König 
28
 *    Christian König 
29
 */
29
 */
30
#include 
30
#include 
31
#include "radeon.h"
31
#include "radeon.h"
32
 
-
 
-
 
32
#include "radeon_trace.h"
33
 
33
 
34
int radeon_semaphore_create(struct radeon_device *rdev,
34
int radeon_semaphore_create(struct radeon_device *rdev,
35
			    struct radeon_semaphore **semaphore)
35
			    struct radeon_semaphore **semaphore)
36
{
36
{
-
 
37
	uint32_t *cpu_addr;
37
	int r;
38
	int i, r;
38
 
39
 
39
	*semaphore = kmalloc(sizeof(struct radeon_semaphore), GFP_KERNEL);
40
	*semaphore = kmalloc(sizeof(struct radeon_semaphore), GFP_KERNEL);
40
	if (*semaphore == NULL) {
41
	if (*semaphore == NULL) {
41
		return -ENOMEM;
42
		return -ENOMEM;
42
	}
43
	}
43
	r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo,
44
	r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, &(*semaphore)->sa_bo,
44
			     &(*semaphore)->sa_bo, 8, 8, true);
45
			     8 * RADEON_NUM_SYNCS, 8);
45
	if (r) {
46
	if (r) {
46
		kfree(*semaphore);
47
		kfree(*semaphore);
47
		*semaphore = NULL;
48
		*semaphore = NULL;
48
		return r;
49
		return r;
49
	}
50
	}
50
	(*semaphore)->waiters = 0;
51
	(*semaphore)->waiters = 0;
51
	(*semaphore)->gpu_addr = radeon_sa_bo_gpu_addr((*semaphore)->sa_bo);
52
	(*semaphore)->gpu_addr = radeon_sa_bo_gpu_addr((*semaphore)->sa_bo);
-
 
53
 
52
	*((uint64_t*)radeon_sa_bo_cpu_addr((*semaphore)->sa_bo)) = 0;
54
	cpu_addr = radeon_sa_bo_cpu_addr((*semaphore)->sa_bo);
-
 
55
	for (i = 0; i < RADEON_NUM_SYNCS; ++i)
-
 
56
		cpu_addr[i] = 0;
-
 
57
 
-
 
58
	for (i = 0; i < RADEON_NUM_RINGS; ++i)
-
 
59
		(*semaphore)->sync_to[i] = NULL;
-
 
60
 
53
	return 0;
61
	return 0;
54
}
62
}
55
 
63
 
56
void radeon_semaphore_emit_signal(struct radeon_device *rdev, int ring,
64
bool radeon_semaphore_emit_signal(struct radeon_device *rdev, int ridx,
57
			          struct radeon_semaphore *semaphore)
65
			          struct radeon_semaphore *semaphore)
-
 
66
{
-
 
67
	struct radeon_ring *ring = &rdev->ring[ridx];
-
 
68
 
-
 
69
	trace_radeon_semaphore_signale(ridx, semaphore);
-
 
70
 
58
{
71
	if (radeon_semaphore_ring_emit(rdev, ridx, ring, semaphore, false)) {
-
 
72
	--semaphore->waiters;
-
 
73
 
59
	--semaphore->waiters;
74
		/* for debugging lockup only, used by sysfs debug files */
-
 
75
		ring->last_semaphore_signal_addr = semaphore->gpu_addr;
-
 
76
		return true;
-
 
77
	}
60
	radeon_semaphore_ring_emit(rdev, ring, &rdev->ring[ring], semaphore, false);
78
	return false;
61
}
79
}
62
 
80
 
63
void radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring,
81
bool radeon_semaphore_emit_wait(struct radeon_device *rdev, int ridx,
-
 
82
			        struct radeon_semaphore *semaphore)
-
 
83
{
-
 
84
	struct radeon_ring *ring = &rdev->ring[ridx];
-
 
85
 
-
 
86
	trace_radeon_semaphore_wait(ridx, semaphore);
64
			        struct radeon_semaphore *semaphore)
87
 
-
 
88
	if (radeon_semaphore_ring_emit(rdev, ridx, ring, semaphore, true)) {
-
 
89
	++semaphore->waiters;
65
{
90
 
-
 
91
		/* for debugging lockup only, used by sysfs debug files */
66
	++semaphore->waiters;
92
		ring->last_semaphore_wait_addr = semaphore->gpu_addr;
-
 
93
		return true;
-
 
94
	}
-
 
95
	return false;
-
 
96
}
-
 
97
 
-
 
98
/**
-
 
99
 * radeon_semaphore_sync_to - use the semaphore to sync to a fence
-
 
100
 *
-
 
101
 * @semaphore: semaphore object to add fence to
-
 
102
 * @fence: fence to sync to
-
 
103
 *
-
 
104
 * Sync to the fence using this semaphore object
-
 
105
 */
-
 
106
void radeon_semaphore_sync_to(struct radeon_semaphore *semaphore,
-
 
107
			      struct radeon_fence *fence)
-
 
108
{
-
 
109
        struct radeon_fence *other;
-
 
110
 
-
 
111
        if (!fence)
-
 
112
                return;
-
 
113
 
-
 
114
        other = semaphore->sync_to[fence->ring];
-
 
115
        semaphore->sync_to[fence->ring] = radeon_fence_later(fence, other);
-
 
116
}
-
 
117
 
-
 
118
/**
-
 
119
 * radeon_semaphore_sync_rings - sync ring to all registered fences
-
 
120
 *
-
 
121
 * @rdev: radeon_device pointer
-
 
122
 * @semaphore: semaphore object to use for sync
67
	radeon_semaphore_ring_emit(rdev, ring, &rdev->ring[ring], semaphore, true);
123
 * @ring: ring that needs sync
-
 
124
 *
68
}
125
 * Ensure that all registered fences are signaled before letting
69
 
126
 * the ring continue. The caller must hold the ring lock.
70
/* caller must hold ring lock */
127
 */
71
int radeon_semaphore_sync_rings(struct radeon_device *rdev,
128
int radeon_semaphore_sync_rings(struct radeon_device *rdev,
-
 
129
				struct radeon_semaphore *semaphore,
72
				struct radeon_semaphore *semaphore,
130
				int ring)
73
				int signaler, int waiter)
131
{
-
 
132
	unsigned count = 0;
-
 
133
	int i, r;
74
{
134
 
-
 
135
        for (i = 0; i < RADEON_NUM_RINGS; ++i) {
75
	int r;
136
		struct radeon_fence *fence = semaphore->sync_to[i];
76
 
-
 
77
	/* no need to signal and wait on the same ring */
137
 
78
	if (signaler == waiter) {
138
		/* check if we really need to sync */
79
		return 0;
139
                if (!radeon_fence_need_sync(fence, ring))
80
	}
140
			continue;
81
 
141
 
82
	/* prevent GPU deadlocks */
142
	/* prevent GPU deadlocks */
83
	if (!rdev->ring[signaler].ready) {
143
		if (!rdev->ring[i].ready) {
84
		dev_err(rdev->dev, "Trying to sync to a disabled ring!");
144
			dev_err(rdev->dev, "Syncing to a disabled ring!");
85
		return -EINVAL;
145
		return -EINVAL;
86
	}
146
	}
-
 
147
 
-
 
148
		if (++count > RADEON_NUM_SYNCS) {
-
 
149
			/* not enough room, wait manually */
-
 
150
			r = radeon_fence_wait(fence, false);
-
 
151
			if (r)
-
 
152
				return r;
-
 
153
			continue;
-
 
154
		}
-
 
155
 
87
 
156
		/* allocate enough space for sync command */
88
	r = radeon_ring_alloc(rdev, &rdev->ring[signaler], 8);
157
		r = radeon_ring_alloc(rdev, &rdev->ring[i], 16);
89
	if (r) {
158
	if (r) {
90
		return r;
159
		return r;
91
	}
160
	}
-
 
161
 
-
 
162
		/* emit the signal semaphore */
92
	radeon_semaphore_emit_signal(rdev, signaler, semaphore);
163
		if (!radeon_semaphore_emit_signal(rdev, i, semaphore)) {
-
 
164
			/* signaling wasn't successful wait manually */
93
	radeon_ring_commit(rdev, &rdev->ring[signaler]);
165
			radeon_ring_undo(&rdev->ring[i]);
-
 
166
			r = radeon_fence_wait(fence, false);
-
 
167
			if (r)
-
 
168
				return r;
-
 
169
			continue;
-
 
170
		}
94
 
171
 
95
	/* we assume caller has already allocated space on waiters ring */
172
	/* we assume caller has already allocated space on waiters ring */
-
 
173
		if (!radeon_semaphore_emit_wait(rdev, ring, semaphore)) {
-
 
174
			/* waiting wasn't successful wait manually */
-
 
175
			radeon_ring_undo(&rdev->ring[i]);
-
 
176
			r = radeon_fence_wait(fence, false);
-
 
177
			if (r)
-
 
178
				return r;
-
 
179
			continue;
96
	radeon_semaphore_emit_wait(rdev, waiter, semaphore);
180
		}
97
 
181
 
-
 
182
		radeon_ring_commit(rdev, &rdev->ring[i], false);
98
	/* for debugging lockup only, used by sysfs debug files */
183
		radeon_fence_note_sync(fence, ring);
-
 
184
 
99
	rdev->ring[signaler].last_semaphore_signal_addr = semaphore->gpu_addr;
185
		semaphore->gpu_addr += 8;
100
	rdev->ring[waiter].last_semaphore_wait_addr = semaphore->gpu_addr;
186
	}
101
 
187
 
102
	return 0;
188
	return 0;
103
}
189
}
104
 
190
 
105
void radeon_semaphore_free(struct radeon_device *rdev,
191
void radeon_semaphore_free(struct radeon_device *rdev,
106
			   struct radeon_semaphore **semaphore,
192
			   struct radeon_semaphore **semaphore,
107
			   struct radeon_fence *fence)
193
			   struct radeon_fence *fence)
108
{
194
{
109
	if (semaphore == NULL || *semaphore == NULL) {
195
	if (semaphore == NULL || *semaphore == NULL) {
110
		return;
196
		return;
111
	}
197
	}
112
	if ((*semaphore)->waiters > 0) {
198
	if ((*semaphore)->waiters > 0) {
113
		dev_err(rdev->dev, "semaphore %p has more waiters than signalers,"
199
		dev_err(rdev->dev, "semaphore %p has more waiters than signalers,"
114
			" hardware lockup imminent!\n", *semaphore);
200
			" hardware lockup imminent!\n", *semaphore);
115
	}
201
	}
116
	radeon_sa_bo_free(rdev, &(*semaphore)->sa_bo, fence);
202
	radeon_sa_bo_free(rdev, &(*semaphore)->sa_bo, fence);
117
	kfree(*semaphore);
203
	kfree(*semaphore);
118
	*semaphore = NULL;
204
	*semaphore = NULL;
119
}
205
}