Subversion Repositories Kolibri OS

Rev

Rev 1404 | Rev 1986 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
1125 serge 1
/*
2
 * Copyright 2009 Jerome Glisse.
3
 * All Rights Reserved.
4
 *
5
 * Permission is hereby granted, free of charge, to any person obtaining a
6
 * copy of this software and associated documentation files (the
7
 * "Software"), to deal in the Software without restriction, including
8
 * without limitation the rights to use, copy, modify, merge, publish,
9
 * distribute, sub license, and/or sell copies of the Software, and to
10
 * permit persons to whom the Software is furnished to do so, subject to
11
 * the following conditions:
12
 *
13
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16
 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17
 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18
 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19
 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20
 *
21
 * The above copyright notice and this permission notice (including the
22
 * next paragraph) shall be included in all copies or substantial portions
23
 * of the Software.
24
 *
25
 */
26
/*
27
 * Authors:
28
 *    Jerome Glisse 
29
 *    Dave Airlie
30
 */
31
#include 
32
#include 
33
#include 
34
#include 
35
#include 
36
#include "drmP.h"
37
#include "drm.h"
38
#include "radeon_reg.h"
39
#include "radeon.h"
40
 
41
int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
42
{
43
	unsigned long irq_flags;
44
 
45
	write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
46
	if (fence->emited) {
47
		write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
48
		return 0;
49
	}
50
	fence->seq = atomic_add_return(1, &rdev->fence_drv.seq);
51
	if (!rdev->cp.ready) {
52
		/* FIXME: cp is not running assume everythings is done right
53
		 * away
54
		 */
55
		WREG32(rdev->fence_drv.scratch_reg, fence->seq);
1179 serge 56
	} else
1125 serge 57
		radeon_fence_ring_emit(rdev, fence);
1179 serge 58
 
1125 serge 59
	fence->emited = true;
60
	list_del(&fence->list);
61
	list_add_tail(&fence->list, &rdev->fence_drv.emited);
62
	write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
63
	return 0;
64
}
65
 
66
static bool radeon_fence_poll_locked(struct radeon_device *rdev)
67
{
68
	struct radeon_fence *fence;
69
	struct list_head *i, *n;
70
	uint32_t seq;
71
	bool wake = false;
1963 serge 72
	unsigned long cjiffies;
1125 serge 73
 
1963 serge 74
	if (rdev->wb.enabled) {
75
		u32 scratch_index;
76
		if (rdev->wb.use_event)
77
			scratch_index = R600_WB_EVENT_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
78
		else
79
			scratch_index = RADEON_WB_SCRATCH_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
80
		seq = rdev->wb.wb[scratch_index/4];
81
	} else
82
	seq = RREG32(rdev->fence_drv.scratch_reg);
83
	if (seq != rdev->fence_drv.last_seq) {
84
		rdev->fence_drv.last_seq = seq;
85
		rdev->fence_drv.last_jiffies = jiffies;
86
		rdev->fence_drv.last_timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
87
	} else {
88
		cjiffies = jiffies;
89
		if (time_after(cjiffies, rdev->fence_drv.last_jiffies)) {
90
			cjiffies -= rdev->fence_drv.last_jiffies;
91
			if (time_after(rdev->fence_drv.last_timeout, cjiffies)) {
92
				/* update the timeout */
93
				rdev->fence_drv.last_timeout -= cjiffies;
94
			} else {
95
				/* the 500ms timeout is elapsed we should test
96
				 * for GPU lockup
97
				 */
98
				rdev->fence_drv.last_timeout = 1;
99
			}
100
		} else {
101
			/* wrap around update last jiffies, we will just wait
102
			 * a little longer
103
			 */
104
			rdev->fence_drv.last_jiffies = cjiffies;
1125 serge 105
	}
1963 serge 106
		return false;
1125 serge 107
	}
108
	n = NULL;
109
	list_for_each(i, &rdev->fence_drv.emited) {
110
		fence = list_entry(i, struct radeon_fence, list);
111
		if (fence->seq == seq) {
112
			n = i;
113
			break;
114
		}
115
	}
116
	/* all fence previous to this one are considered as signaled */
117
	if (n) {
118
		i = n;
119
		do {
120
			n = i->prev;
121
			list_del(i);
122
			list_add_tail(i, &rdev->fence_drv.signaled);
123
			fence = list_entry(i, struct radeon_fence, list);
124
			fence->signaled = true;
125
			i = n;
126
		} while (i != &rdev->fence_drv.emited);
127
		wake = true;
128
	}
129
	return wake;
130
}
131
 
132
static void radeon_fence_destroy(struct kref *kref)
133
{
134
	unsigned long irq_flags;
135
        struct radeon_fence *fence;
136
 
137
	fence = container_of(kref, struct radeon_fence, kref);
138
	write_lock_irqsave(&fence->rdev->fence_drv.lock, irq_flags);
139
	list_del(&fence->list);
140
	fence->emited = false;
141
	write_unlock_irqrestore(&fence->rdev->fence_drv.lock, irq_flags);
142
	kfree(fence);
143
}
144
 
145
int radeon_fence_create(struct radeon_device *rdev, struct radeon_fence **fence)
146
{
147
	unsigned long irq_flags;
148
 
149
	*fence = kmalloc(sizeof(struct radeon_fence), GFP_KERNEL);
150
	if ((*fence) == NULL) {
151
		return -ENOMEM;
152
	}
153
	kref_init(&((*fence)->kref));
154
	(*fence)->rdev = rdev;
155
	(*fence)->emited = false;
156
	(*fence)->signaled = false;
157
	(*fence)->seq = 0;
158
	INIT_LIST_HEAD(&(*fence)->list);
159
 
160
	write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
161
	list_add_tail(&(*fence)->list, &rdev->fence_drv.created);
162
	write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
163
	return 0;
164
}
165
 
166
 
167
bool radeon_fence_signaled(struct radeon_fence *fence)
168
{
169
	unsigned long irq_flags;
170
	bool signaled = false;
171
 
1404 serge 172
	if (!fence)
1125 serge 173
		return true;
1404 serge 174
 
175
	if (fence->rdev->gpu_lockup)
1125 serge 176
		return true;
1404 serge 177
 
1125 serge 178
	write_lock_irqsave(&fence->rdev->fence_drv.lock, irq_flags);
179
	signaled = fence->signaled;
180
	/* if we are shuting down report all fence as signaled */
181
	if (fence->rdev->shutdown) {
182
		signaled = true;
183
	}
184
	if (!fence->emited) {
185
		WARN(1, "Querying an unemited fence : %p !\n", fence);
186
		signaled = true;
187
	}
188
	if (!signaled) {
189
		radeon_fence_poll_locked(fence->rdev);
190
		signaled = fence->signaled;
191
	}
192
	write_unlock_irqrestore(&fence->rdev->fence_drv.lock, irq_flags);
193
	return signaled;
194
}
195
 
1179 serge 196
int radeon_fence_wait(struct radeon_fence *fence, bool intr)
197
{
198
	struct radeon_device *rdev;
1963 serge 199
	unsigned long irq_flags, timeout;
200
	u32 seq;
1125 serge 201
	int r;
202
 
203
	if (fence == NULL) {
204
		WARN(1, "Querying an invalid fence : %p !\n", fence);
205
		return 0;
206
	}
207
	rdev = fence->rdev;
208
	if (radeon_fence_signaled(fence)) {
209
		return 0;
210
	}
1963 serge 211
	timeout = rdev->fence_drv.last_timeout;
1125 serge 212
retry:
1963 serge 213
	/* save current sequence used to check for GPU lockup */
214
	seq = rdev->fence_drv.last_seq;
1179 serge 215
	if (intr) {
1321 serge 216
		radeon_irq_kms_sw_irq_get(rdev);
1125 serge 217
		r = wait_event_interruptible_timeout(rdev->fence_drv.queue,
218
				radeon_fence_signaled(fence), timeout);
1321 serge 219
		radeon_irq_kms_sw_irq_put(rdev);
1963 serge 220
		if (unlikely(r < 0)) {
1321 serge 221
			return r;
1963 serge 222
		}
1125 serge 223
	} else {
1321 serge 224
		radeon_irq_kms_sw_irq_get(rdev);
1125 serge 225
		r = wait_event_timeout(rdev->fence_drv.queue,
226
			 radeon_fence_signaled(fence), timeout);
1321 serge 227
		radeon_irq_kms_sw_irq_put(rdev);
1125 serge 228
	}
229
	if (unlikely(!radeon_fence_signaled(fence))) {
1963 serge 230
		/* we were interrupted for some reason and fence isn't
231
		 * isn't signaled yet, resume wait
232
		 */
233
		if (r) {
234
			timeout = r;
235
			goto retry;
1125 serge 236
		}
1963 serge 237
		/* don't protect read access to rdev->fence_drv.last_seq
238
		 * if we experiencing a lockup the value doesn't change
239
		 */
240
		if (seq == rdev->fence_drv.last_seq && radeon_gpu_is_lockup(rdev)) {
241
			/* good news we believe it's a lockup */
242
			WARN(1, "GPU lockup (waiting for 0x%08X last fence id 0x%08X)\n",
243
			     fence->seq, seq);
244
			/* FIXME: what should we do ? marking everyone
245
			 * as signaled for now
246
			 */
247
			rdev->gpu_lockup = true;
248
			r = radeon_gpu_reset(rdev);
249
			if (r)
250
				return r;
1125 serge 251
				WREG32(rdev->fence_drv.scratch_reg, fence->seq);
1963 serge 252
			rdev->gpu_lockup = false;
1125 serge 253
			}
1963 serge 254
		timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
255
		write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
256
		rdev->fence_drv.last_timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
257
		rdev->fence_drv.last_jiffies = jiffies;
258
		write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
1125 serge 259
		goto retry;
260
	}
261
	return 0;
262
}
263
 
264
int radeon_fence_wait_next(struct radeon_device *rdev)
265
{
266
	unsigned long irq_flags;
267
	struct radeon_fence *fence;
268
	int r;
269
 
270
	if (rdev->gpu_lockup) {
271
		return 0;
272
	}
273
	write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
274
	if (list_empty(&rdev->fence_drv.emited)) {
275
		write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
276
		return 0;
277
	}
278
	fence = list_entry(rdev->fence_drv.emited.next,
279
			   struct radeon_fence, list);
280
	radeon_fence_ref(fence);
281
	write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
282
	r = radeon_fence_wait(fence, false);
283
	radeon_fence_unref(&fence);
284
	return r;
285
}
286
 
287
int radeon_fence_wait_last(struct radeon_device *rdev)
288
{
289
	unsigned long irq_flags;
290
	struct radeon_fence *fence;
291
	int r;
292
 
293
	if (rdev->gpu_lockup) {
294
		return 0;
295
	}
296
	write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
297
	if (list_empty(&rdev->fence_drv.emited)) {
298
		write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
299
		return 0;
300
	}
301
	fence = list_entry(rdev->fence_drv.emited.prev,
302
			   struct radeon_fence, list);
303
	radeon_fence_ref(fence);
304
	write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
305
	r = radeon_fence_wait(fence, false);
306
	radeon_fence_unref(&fence);
307
	return r;
308
}
309
 
310
struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence)
311
{
312
	kref_get(&fence->kref);
313
	return fence;
314
}
315
 
316
void radeon_fence_unref(struct radeon_fence **fence)
317
{
318
	struct radeon_fence *tmp = *fence;
319
 
320
	*fence = NULL;
321
	if (tmp) {
322
		kref_put(&tmp->kref, &radeon_fence_destroy);
323
	}
324
}
325
 
326
void radeon_fence_process(struct radeon_device *rdev)
327
{
328
	unsigned long irq_flags;
329
	bool wake;
330
 
331
	write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
332
	wake = radeon_fence_poll_locked(rdev);
333
	write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
334
	if (wake) {
335
		wake_up_all(&rdev->fence_drv.queue);
336
	}
337
}
338
 
339
int radeon_fence_driver_init(struct radeon_device *rdev)
340
{
341
	unsigned long irq_flags;
342
	int r;
343
 
344
	write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
345
	r = radeon_scratch_get(rdev, &rdev->fence_drv.scratch_reg);
346
	if (r) {
1404 serge 347
		dev_err(rdev->dev, "fence failed to get scratch register\n");
1125 serge 348
		write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
349
		return r;
350
	}
351
	WREG32(rdev->fence_drv.scratch_reg, 0);
352
	atomic_set(&rdev->fence_drv.seq, 0);
353
	INIT_LIST_HEAD(&rdev->fence_drv.created);
354
	INIT_LIST_HEAD(&rdev->fence_drv.emited);
355
	INIT_LIST_HEAD(&rdev->fence_drv.signaled);
356
	init_waitqueue_head(&rdev->fence_drv.queue);
1404 serge 357
	rdev->fence_drv.initialized = true;
1125 serge 358
	write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
359
	if (radeon_debugfs_fence_init(rdev)) {
1404 serge 360
		dev_err(rdev->dev, "fence debugfs file creation failed\n");
1125 serge 361
	}
362
	return 0;
363
}
364
 
365
void radeon_fence_driver_fini(struct radeon_device *rdev)
366
{
367
	unsigned long irq_flags;
368
 
1404 serge 369
	if (!rdev->fence_drv.initialized)
370
		return;
1125 serge 371
	wake_up_all(&rdev->fence_drv.queue);
372
	write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
373
	radeon_scratch_free(rdev, rdev->fence_drv.scratch_reg);
374
	write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
1404 serge 375
	rdev->fence_drv.initialized = false;
1125 serge 376
}
377
 
378
 
379
/*
380
 * Fence debugfs
381
 */
382
#if defined(CONFIG_DEBUG_FS)
383
static int radeon_debugfs_fence_info(struct seq_file *m, void *data)
384
{
385
	struct drm_info_node *node = (struct drm_info_node *)m->private;
386
	struct drm_device *dev = node->minor->dev;
387
	struct radeon_device *rdev = dev->dev_private;
388
	struct radeon_fence *fence;
389
 
390
	seq_printf(m, "Last signaled fence 0x%08X\n",
391
		   RREG32(rdev->fence_drv.scratch_reg));
392
	if (!list_empty(&rdev->fence_drv.emited)) {
393
		   fence = list_entry(rdev->fence_drv.emited.prev,
394
				      struct radeon_fence, list);
395
		   seq_printf(m, "Last emited fence %p with 0x%08X\n",
396
			      fence,  fence->seq);
397
	}
398
	return 0;
399
}
400
 
401
static struct drm_info_list radeon_debugfs_fence_list[] = {
402
	{"radeon_fence_info", &radeon_debugfs_fence_info, 0, NULL},
403
};
404
#endif
405
 
406
int radeon_debugfs_fence_init(struct radeon_device *rdev)
407
{
408
#if defined(CONFIG_DEBUG_FS)
409
	return radeon_debugfs_add_files(rdev, radeon_debugfs_fence_list, 1);
410
#else
411
	return 0;
412
#endif
413
}