Subversion Repositories Kolibri OS

Rev

Rev 1321 | Rev 1963 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 1321 Rev 1404
1
/*
1
/*
2
 * Copyright 2009 Jerome Glisse.
2
 * Copyright 2009 Jerome Glisse.
3
 * All Rights Reserved.
3
 * All Rights Reserved.
4
 *
4
 *
5
 * Permission is hereby granted, free of charge, to any person obtaining a
5
 * Permission is hereby granted, free of charge, to any person obtaining a
6
 * copy of this software and associated documentation files (the
6
 * copy of this software and associated documentation files (the
7
 * "Software"), to deal in the Software without restriction, including
7
 * "Software"), to deal in the Software without restriction, including
8
 * without limitation the rights to use, copy, modify, merge, publish,
8
 * without limitation the rights to use, copy, modify, merge, publish,
9
 * distribute, sub license, and/or sell copies of the Software, and to
9
 * distribute, sub license, and/or sell copies of the Software, and to
10
 * permit persons to whom the Software is furnished to do so, subject to
10
 * permit persons to whom the Software is furnished to do so, subject to
11
 * the following conditions:
11
 * the following conditions:
12
 *
12
 *
13
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
13
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
15
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16
 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
16
 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17
 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
17
 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18
 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
18
 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19
 * USE OR OTHER DEALINGS IN THE SOFTWARE.
19
 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20
 *
20
 *
21
 * The above copyright notice and this permission notice (including the
21
 * The above copyright notice and this permission notice (including the
22
 * next paragraph) shall be included in all copies or substantial portions
22
 * next paragraph) shall be included in all copies or substantial portions
23
 * of the Software.
23
 * of the Software.
24
 *
24
 *
25
 */
25
 */
26
/*
26
/*
27
 * Authors:
27
 * Authors:
28
 *    Jerome Glisse 
28
 *    Jerome Glisse 
29
 *    Dave Airlie
29
 *    Dave Airlie
30
 */
30
 */
31
#include 
31
#include 
32
#include 
32
#include 
33
#include 
33
#include 
34
#include 
34
#include 
35
#include 
35
#include 
36
#include "drmP.h"
36
#include "drmP.h"
37
#include "drm.h"
37
#include "drm.h"
38
#include "radeon_reg.h"
38
#include "radeon_reg.h"
39
#include "radeon.h"
39
#include "radeon.h"
40
 
40
 
41
int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
41
int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
42
{
42
{
43
	unsigned long irq_flags;
43
	unsigned long irq_flags;
44
 
44
 
45
	write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
45
	write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
46
	if (fence->emited) {
46
	if (fence->emited) {
47
		write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
47
		write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
48
		return 0;
48
		return 0;
49
	}
49
	}
50
	fence->seq = atomic_add_return(1, &rdev->fence_drv.seq);
50
	fence->seq = atomic_add_return(1, &rdev->fence_drv.seq);
51
	if (!rdev->cp.ready) {
51
	if (!rdev->cp.ready) {
52
		/* FIXME: cp is not running assume everythings is done right
52
		/* FIXME: cp is not running assume everythings is done right
53
		 * away
53
		 * away
54
		 */
54
		 */
55
		WREG32(rdev->fence_drv.scratch_reg, fence->seq);
55
		WREG32(rdev->fence_drv.scratch_reg, fence->seq);
56
	} else
56
	} else
57
		radeon_fence_ring_emit(rdev, fence);
57
		radeon_fence_ring_emit(rdev, fence);
58
 
58
 
59
	fence->emited = true;
59
	fence->emited = true;
60
	fence->timeout = jiffies + ((2000 * HZ) / 1000);
60
	fence->timeout = jiffies + ((2000 * HZ) / 1000);
61
	list_del(&fence->list);
61
	list_del(&fence->list);
62
	list_add_tail(&fence->list, &rdev->fence_drv.emited);
62
	list_add_tail(&fence->list, &rdev->fence_drv.emited);
63
	write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
63
	write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
64
	return 0;
64
	return 0;
65
}
65
}
66
 
66
 
67
static bool radeon_fence_poll_locked(struct radeon_device *rdev)
67
static bool radeon_fence_poll_locked(struct radeon_device *rdev)
68
{
68
{
69
	struct radeon_fence *fence;
69
	struct radeon_fence *fence;
70
	struct list_head *i, *n;
70
	struct list_head *i, *n;
71
	uint32_t seq;
71
	uint32_t seq;
72
	bool wake = false;
72
	bool wake = false;
73
 
73
 
74
	if (rdev == NULL) {
74
	if (rdev == NULL) {
75
		return true;
75
		return true;
76
	}
76
	}
77
	if (rdev->shutdown) {
77
	if (rdev->shutdown) {
78
		return true;
78
		return true;
79
	}
79
	}
80
	seq = RREG32(rdev->fence_drv.scratch_reg);
80
	seq = RREG32(rdev->fence_drv.scratch_reg);
81
	rdev->fence_drv.last_seq = seq;
81
	rdev->fence_drv.last_seq = seq;
82
	n = NULL;
82
	n = NULL;
83
	list_for_each(i, &rdev->fence_drv.emited) {
83
	list_for_each(i, &rdev->fence_drv.emited) {
84
		fence = list_entry(i, struct radeon_fence, list);
84
		fence = list_entry(i, struct radeon_fence, list);
85
		if (fence->seq == seq) {
85
		if (fence->seq == seq) {
86
			n = i;
86
			n = i;
87
			break;
87
			break;
88
		}
88
		}
89
	}
89
	}
90
	/* all fence previous to this one are considered as signaled */
90
	/* all fence previous to this one are considered as signaled */
91
	if (n) {
91
	if (n) {
92
		i = n;
92
		i = n;
93
		do {
93
		do {
94
			n = i->prev;
94
			n = i->prev;
95
			list_del(i);
95
			list_del(i);
96
			list_add_tail(i, &rdev->fence_drv.signaled);
96
			list_add_tail(i, &rdev->fence_drv.signaled);
97
			fence = list_entry(i, struct radeon_fence, list);
97
			fence = list_entry(i, struct radeon_fence, list);
98
			fence->signaled = true;
98
			fence->signaled = true;
99
			i = n;
99
			i = n;
100
		} while (i != &rdev->fence_drv.emited);
100
		} while (i != &rdev->fence_drv.emited);
101
		wake = true;
101
		wake = true;
102
	}
102
	}
103
	return wake;
103
	return wake;
104
}
104
}
105
 
105
 
106
static void radeon_fence_destroy(struct kref *kref)
106
static void radeon_fence_destroy(struct kref *kref)
107
{
107
{
108
	unsigned long irq_flags;
108
	unsigned long irq_flags;
109
        struct radeon_fence *fence;
109
        struct radeon_fence *fence;
110
 
110
 
111
	fence = container_of(kref, struct radeon_fence, kref);
111
	fence = container_of(kref, struct radeon_fence, kref);
112
	write_lock_irqsave(&fence->rdev->fence_drv.lock, irq_flags);
112
	write_lock_irqsave(&fence->rdev->fence_drv.lock, irq_flags);
113
	list_del(&fence->list);
113
	list_del(&fence->list);
114
	fence->emited = false;
114
	fence->emited = false;
115
	write_unlock_irqrestore(&fence->rdev->fence_drv.lock, irq_flags);
115
	write_unlock_irqrestore(&fence->rdev->fence_drv.lock, irq_flags);
116
	kfree(fence);
116
	kfree(fence);
117
}
117
}
118
 
118
 
119
int radeon_fence_create(struct radeon_device *rdev, struct radeon_fence **fence)
119
int radeon_fence_create(struct radeon_device *rdev, struct radeon_fence **fence)
120
{
120
{
121
	unsigned long irq_flags;
121
	unsigned long irq_flags;
122
 
122
 
123
	*fence = kmalloc(sizeof(struct radeon_fence), GFP_KERNEL);
123
	*fence = kmalloc(sizeof(struct radeon_fence), GFP_KERNEL);
124
	if ((*fence) == NULL) {
124
	if ((*fence) == NULL) {
125
		return -ENOMEM;
125
		return -ENOMEM;
126
	}
126
	}
127
	kref_init(&((*fence)->kref));
127
	kref_init(&((*fence)->kref));
128
	(*fence)->rdev = rdev;
128
	(*fence)->rdev = rdev;
129
	(*fence)->emited = false;
129
	(*fence)->emited = false;
130
	(*fence)->signaled = false;
130
	(*fence)->signaled = false;
131
	(*fence)->seq = 0;
131
	(*fence)->seq = 0;
132
	INIT_LIST_HEAD(&(*fence)->list);
132
	INIT_LIST_HEAD(&(*fence)->list);
133
 
133
 
134
	write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
134
	write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
135
	list_add_tail(&(*fence)->list, &rdev->fence_drv.created);
135
	list_add_tail(&(*fence)->list, &rdev->fence_drv.created);
136
	write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
136
	write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
137
	return 0;
137
	return 0;
138
}
138
}
139
 
139
 
140
 
140
 
141
bool radeon_fence_signaled(struct radeon_fence *fence)
141
bool radeon_fence_signaled(struct radeon_fence *fence)
142
{
142
{
143
	struct radeon_device *rdev = fence->rdev;
-
 
144
	unsigned long irq_flags;
143
	unsigned long irq_flags;
145
	bool signaled = false;
144
	bool signaled = false;
146
 
145
 
147
	if (rdev->gpu_lockup) {
146
	if (!fence)
148
		return true;
147
		return true;
149
	}
148
 
150
	if (fence == NULL) {
149
	if (fence->rdev->gpu_lockup)
151
		return true;
150
		return true;
152
	}
151
 
153
	write_lock_irqsave(&fence->rdev->fence_drv.lock, irq_flags);
152
	write_lock_irqsave(&fence->rdev->fence_drv.lock, irq_flags);
154
	signaled = fence->signaled;
153
	signaled = fence->signaled;
155
	/* if we are shuting down report all fence as signaled */
154
	/* if we are shuting down report all fence as signaled */
156
	if (fence->rdev->shutdown) {
155
	if (fence->rdev->shutdown) {
157
		signaled = true;
156
		signaled = true;
158
	}
157
	}
159
	if (!fence->emited) {
158
	if (!fence->emited) {
160
		WARN(1, "Querying an unemited fence : %p !\n", fence);
159
		WARN(1, "Querying an unemited fence : %p !\n", fence);
161
		signaled = true;
160
		signaled = true;
162
	}
161
	}
163
	if (!signaled) {
162
	if (!signaled) {
164
		radeon_fence_poll_locked(fence->rdev);
163
		radeon_fence_poll_locked(fence->rdev);
165
		signaled = fence->signaled;
164
		signaled = fence->signaled;
166
	}
165
	}
167
	write_unlock_irqrestore(&fence->rdev->fence_drv.lock, irq_flags);
166
	write_unlock_irqrestore(&fence->rdev->fence_drv.lock, irq_flags);
168
	return signaled;
167
	return signaled;
169
}
168
}
170
 
169
 
171
int radeon_fence_wait(struct radeon_fence *fence, bool intr)
170
int radeon_fence_wait(struct radeon_fence *fence, bool intr)
172
{
171
{
173
	struct radeon_device *rdev;
172
	struct radeon_device *rdev;
174
	unsigned long cur_jiffies;
173
	unsigned long cur_jiffies;
175
	unsigned long timeout;
174
	unsigned long timeout;
176
	bool expired = false;
175
	bool expired = false;
177
	int r;
176
	int r;
178
 
177
 
179
	if (fence == NULL) {
178
	if (fence == NULL) {
180
		WARN(1, "Querying an invalid fence : %p !\n", fence);
179
		WARN(1, "Querying an invalid fence : %p !\n", fence);
181
		return 0;
180
		return 0;
182
	}
181
	}
183
	rdev = fence->rdev;
182
	rdev = fence->rdev;
184
	if (radeon_fence_signaled(fence)) {
183
	if (radeon_fence_signaled(fence)) {
185
		return 0;
184
		return 0;
186
	}
185
	}
187
 
186
 
188
retry:
187
retry:
189
	cur_jiffies = jiffies;
188
	cur_jiffies = jiffies;
190
	timeout = HZ / 100;
189
	timeout = HZ / 100;
191
	if (time_after(fence->timeout, cur_jiffies)) {
190
	if (time_after(fence->timeout, cur_jiffies)) {
192
		timeout = fence->timeout - cur_jiffies;
191
		timeout = fence->timeout - cur_jiffies;
193
	}
192
	}
194
 
193
 
195
	if (intr) {
194
	if (intr) {
196
		radeon_irq_kms_sw_irq_get(rdev);
195
		radeon_irq_kms_sw_irq_get(rdev);
197
		r = wait_event_interruptible_timeout(rdev->fence_drv.queue,
196
		r = wait_event_interruptible_timeout(rdev->fence_drv.queue,
198
				radeon_fence_signaled(fence), timeout);
197
				radeon_fence_signaled(fence), timeout);
199
		radeon_irq_kms_sw_irq_put(rdev);
198
		radeon_irq_kms_sw_irq_put(rdev);
200
		if (unlikely(r < 0))
199
		if (unlikely(r < 0))
201
			return r;
200
			return r;
202
	} else {
201
	} else {
203
		radeon_irq_kms_sw_irq_get(rdev);
202
		radeon_irq_kms_sw_irq_get(rdev);
204
		r = wait_event_timeout(rdev->fence_drv.queue,
203
		r = wait_event_timeout(rdev->fence_drv.queue,
205
			 radeon_fence_signaled(fence), timeout);
204
			 radeon_fence_signaled(fence), timeout);
206
		radeon_irq_kms_sw_irq_put(rdev);
205
		radeon_irq_kms_sw_irq_put(rdev);
207
	}
206
	}
208
	if (unlikely(!radeon_fence_signaled(fence))) {
207
	if (unlikely(!radeon_fence_signaled(fence))) {
209
		if (unlikely(r == 0)) {
208
		if (unlikely(r == 0)) {
210
			expired = true;
209
			expired = true;
211
		}
210
		}
212
		if (unlikely(expired)) {
211
		if (unlikely(expired)) {
213
			timeout = 1;
212
			timeout = 1;
214
			if (time_after(cur_jiffies, fence->timeout)) {
213
			if (time_after(cur_jiffies, fence->timeout)) {
215
				timeout = cur_jiffies - fence->timeout;
214
				timeout = cur_jiffies - fence->timeout;
216
			}
215
			}
217
			timeout = jiffies_to_msecs(timeout);
216
			timeout = jiffies_to_msecs(timeout);
218
			if (timeout > 500) {
217
			if (timeout > 500) {
219
				DRM_ERROR("fence(%p:0x%08X) %lums timeout "
218
				DRM_ERROR("fence(%p:0x%08X) %lums timeout "
220
					  "going to reset GPU\n",
219
					  "going to reset GPU\n",
221
					  fence, fence->seq, timeout);
220
					  fence, fence->seq, timeout);
222
				radeon_gpu_reset(rdev);
221
				radeon_gpu_reset(rdev);
223
				WREG32(rdev->fence_drv.scratch_reg, fence->seq);
222
				WREG32(rdev->fence_drv.scratch_reg, fence->seq);
224
			}
223
			}
225
		}
224
		}
226
		goto retry;
225
		goto retry;
227
	}
226
	}
228
	if (unlikely(expired)) {
227
	if (unlikely(expired)) {
229
		rdev->fence_drv.count_timeout++;
228
		rdev->fence_drv.count_timeout++;
230
		cur_jiffies = jiffies;
229
		cur_jiffies = jiffies;
231
		timeout = 1;
230
		timeout = 1;
232
		if (time_after(cur_jiffies, fence->timeout)) {
231
		if (time_after(cur_jiffies, fence->timeout)) {
233
			timeout = cur_jiffies - fence->timeout;
232
			timeout = cur_jiffies - fence->timeout;
234
		}
233
		}
235
		timeout = jiffies_to_msecs(timeout);
234
		timeout = jiffies_to_msecs(timeout);
236
		DRM_ERROR("fence(%p:0x%08X) %lums timeout\n",
235
		DRM_ERROR("fence(%p:0x%08X) %lums timeout\n",
237
			  fence, fence->seq, timeout);
236
			  fence, fence->seq, timeout);
238
		DRM_ERROR("last signaled fence(0x%08X)\n",
237
		DRM_ERROR("last signaled fence(0x%08X)\n",
239
			  rdev->fence_drv.last_seq);
238
			  rdev->fence_drv.last_seq);
240
	}
239
	}
241
	return 0;
240
	return 0;
242
}
241
}
243
 
242
 
244
int radeon_fence_wait_next(struct radeon_device *rdev)
243
int radeon_fence_wait_next(struct radeon_device *rdev)
245
{
244
{
246
	unsigned long irq_flags;
245
	unsigned long irq_flags;
247
	struct radeon_fence *fence;
246
	struct radeon_fence *fence;
248
	int r;
247
	int r;
249
 
248
 
250
	if (rdev->gpu_lockup) {
249
	if (rdev->gpu_lockup) {
251
		return 0;
250
		return 0;
252
	}
251
	}
253
	write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
252
	write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
254
	if (list_empty(&rdev->fence_drv.emited)) {
253
	if (list_empty(&rdev->fence_drv.emited)) {
255
		write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
254
		write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
256
		return 0;
255
		return 0;
257
	}
256
	}
258
	fence = list_entry(rdev->fence_drv.emited.next,
257
	fence = list_entry(rdev->fence_drv.emited.next,
259
			   struct radeon_fence, list);
258
			   struct radeon_fence, list);
260
	radeon_fence_ref(fence);
259
	radeon_fence_ref(fence);
261
	write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
260
	write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
262
	r = radeon_fence_wait(fence, false);
261
	r = radeon_fence_wait(fence, false);
263
	radeon_fence_unref(&fence);
262
	radeon_fence_unref(&fence);
264
	return r;
263
	return r;
265
}
264
}
266
 
265
 
267
int radeon_fence_wait_last(struct radeon_device *rdev)
266
int radeon_fence_wait_last(struct radeon_device *rdev)
268
{
267
{
269
	unsigned long irq_flags;
268
	unsigned long irq_flags;
270
	struct radeon_fence *fence;
269
	struct radeon_fence *fence;
271
	int r;
270
	int r;
272
 
271
 
273
	if (rdev->gpu_lockup) {
272
	if (rdev->gpu_lockup) {
274
		return 0;
273
		return 0;
275
	}
274
	}
276
	write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
275
	write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
277
	if (list_empty(&rdev->fence_drv.emited)) {
276
	if (list_empty(&rdev->fence_drv.emited)) {
278
		write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
277
		write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
279
		return 0;
278
		return 0;
280
	}
279
	}
281
	fence = list_entry(rdev->fence_drv.emited.prev,
280
	fence = list_entry(rdev->fence_drv.emited.prev,
282
			   struct radeon_fence, list);
281
			   struct radeon_fence, list);
283
	radeon_fence_ref(fence);
282
	radeon_fence_ref(fence);
284
	write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
283
	write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
285
	r = radeon_fence_wait(fence, false);
284
	r = radeon_fence_wait(fence, false);
286
	radeon_fence_unref(&fence);
285
	radeon_fence_unref(&fence);
287
	return r;
286
	return r;
288
}
287
}
289
 
288
 
290
struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence)
289
struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence)
291
{
290
{
292
	kref_get(&fence->kref);
291
	kref_get(&fence->kref);
293
	return fence;
292
	return fence;
294
}
293
}
295
 
294
 
296
void radeon_fence_unref(struct radeon_fence **fence)
295
void radeon_fence_unref(struct radeon_fence **fence)
297
{
296
{
298
	struct radeon_fence *tmp = *fence;
297
	struct radeon_fence *tmp = *fence;
299
 
298
 
300
	*fence = NULL;
299
	*fence = NULL;
301
	if (tmp) {
300
	if (tmp) {
302
		kref_put(&tmp->kref, &radeon_fence_destroy);
301
		kref_put(&tmp->kref, &radeon_fence_destroy);
303
	}
302
	}
304
}
303
}
305
 
304
 
306
void radeon_fence_process(struct radeon_device *rdev)
305
void radeon_fence_process(struct radeon_device *rdev)
307
{
306
{
308
	unsigned long irq_flags;
307
	unsigned long irq_flags;
309
	bool wake;
308
	bool wake;
310
 
309
 
311
	write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
310
	write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
312
	wake = radeon_fence_poll_locked(rdev);
311
	wake = radeon_fence_poll_locked(rdev);
313
	write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
312
	write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
314
	if (wake) {
313
	if (wake) {
315
		wake_up_all(&rdev->fence_drv.queue);
314
		wake_up_all(&rdev->fence_drv.queue);
316
	}
315
	}
317
}
316
}
318
 
317
 
319
int radeon_fence_driver_init(struct radeon_device *rdev)
318
int radeon_fence_driver_init(struct radeon_device *rdev)
320
{
319
{
321
	unsigned long irq_flags;
320
	unsigned long irq_flags;
322
	int r;
321
	int r;
323
 
322
 
324
	write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
323
	write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
325
	r = radeon_scratch_get(rdev, &rdev->fence_drv.scratch_reg);
324
	r = radeon_scratch_get(rdev, &rdev->fence_drv.scratch_reg);
326
	if (r) {
325
	if (r) {
327
		DRM_ERROR("Fence failed to get a scratch register.");
326
		dev_err(rdev->dev, "fence failed to get scratch register\n");
328
		write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
327
		write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
329
		return r;
328
		return r;
330
	}
329
	}
331
	WREG32(rdev->fence_drv.scratch_reg, 0);
330
	WREG32(rdev->fence_drv.scratch_reg, 0);
332
	atomic_set(&rdev->fence_drv.seq, 0);
331
	atomic_set(&rdev->fence_drv.seq, 0);
333
	INIT_LIST_HEAD(&rdev->fence_drv.created);
332
	INIT_LIST_HEAD(&rdev->fence_drv.created);
334
	INIT_LIST_HEAD(&rdev->fence_drv.emited);
333
	INIT_LIST_HEAD(&rdev->fence_drv.emited);
335
	INIT_LIST_HEAD(&rdev->fence_drv.signaled);
334
	INIT_LIST_HEAD(&rdev->fence_drv.signaled);
336
	rdev->fence_drv.count_timeout = 0;
335
	rdev->fence_drv.count_timeout = 0;
337
	init_waitqueue_head(&rdev->fence_drv.queue);
336
	init_waitqueue_head(&rdev->fence_drv.queue);
-
 
337
	rdev->fence_drv.initialized = true;
338
	write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
338
	write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
339
	if (radeon_debugfs_fence_init(rdev)) {
339
	if (radeon_debugfs_fence_init(rdev)) {
340
		DRM_ERROR("Failed to register debugfs file for fence !\n");
340
		dev_err(rdev->dev, "fence debugfs file creation failed\n");
341
	}
341
	}
342
	return 0;
342
	return 0;
343
}
343
}
344
 
344
 
345
void radeon_fence_driver_fini(struct radeon_device *rdev)
345
void radeon_fence_driver_fini(struct radeon_device *rdev)
346
{
346
{
347
	unsigned long irq_flags;
347
	unsigned long irq_flags;
-
 
348
 
-
 
349
	if (!rdev->fence_drv.initialized)
348
 
350
		return;
349
	wake_up_all(&rdev->fence_drv.queue);
351
	wake_up_all(&rdev->fence_drv.queue);
350
	write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
352
	write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
351
	radeon_scratch_free(rdev, rdev->fence_drv.scratch_reg);
353
	radeon_scratch_free(rdev, rdev->fence_drv.scratch_reg);
352
	write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
354
	write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
353
	DRM_INFO("radeon: fence finalized\n");
355
	rdev->fence_drv.initialized = false;
354
}
356
}
355
 
357
 
356
 
358
 
357
/*
359
/*
358
 * Fence debugfs
360
 * Fence debugfs
359
 */
361
 */
360
#if defined(CONFIG_DEBUG_FS)
362
#if defined(CONFIG_DEBUG_FS)
361
static int radeon_debugfs_fence_info(struct seq_file *m, void *data)
363
static int radeon_debugfs_fence_info(struct seq_file *m, void *data)
362
{
364
{
363
	struct drm_info_node *node = (struct drm_info_node *)m->private;
365
	struct drm_info_node *node = (struct drm_info_node *)m->private;
364
	struct drm_device *dev = node->minor->dev;
366
	struct drm_device *dev = node->minor->dev;
365
	struct radeon_device *rdev = dev->dev_private;
367
	struct radeon_device *rdev = dev->dev_private;
366
	struct radeon_fence *fence;
368
	struct radeon_fence *fence;
367
 
369
 
368
	seq_printf(m, "Last signaled fence 0x%08X\n",
370
	seq_printf(m, "Last signaled fence 0x%08X\n",
369
		   RREG32(rdev->fence_drv.scratch_reg));
371
		   RREG32(rdev->fence_drv.scratch_reg));
370
	if (!list_empty(&rdev->fence_drv.emited)) {
372
	if (!list_empty(&rdev->fence_drv.emited)) {
371
		   fence = list_entry(rdev->fence_drv.emited.prev,
373
		   fence = list_entry(rdev->fence_drv.emited.prev,
372
				      struct radeon_fence, list);
374
				      struct radeon_fence, list);
373
		   seq_printf(m, "Last emited fence %p with 0x%08X\n",
375
		   seq_printf(m, "Last emited fence %p with 0x%08X\n",
374
			      fence,  fence->seq);
376
			      fence,  fence->seq);
375
	}
377
	}
376
	return 0;
378
	return 0;
377
}
379
}
378
 
380
 
379
static struct drm_info_list radeon_debugfs_fence_list[] = {
381
static struct drm_info_list radeon_debugfs_fence_list[] = {
380
	{"radeon_fence_info", &radeon_debugfs_fence_info, 0, NULL},
382
	{"radeon_fence_info", &radeon_debugfs_fence_info, 0, NULL},
381
};
383
};
382
#endif
384
#endif
383
 
385
 
384
int radeon_debugfs_fence_init(struct radeon_device *rdev)
386
int radeon_debugfs_fence_init(struct radeon_device *rdev)
385
{
387
{
386
#if defined(CONFIG_DEBUG_FS)
388
#if defined(CONFIG_DEBUG_FS)
387
	return radeon_debugfs_add_files(rdev, radeon_debugfs_fence_list, 1);
389
	return radeon_debugfs_add_files(rdev, radeon_debugfs_fence_list, 1);
388
#else
390
#else
389
	return 0;
391
	return 0;
390
#endif
392
#endif
391
}
393
}