Subversion Repositories Kolibri OS

Rev

Rev 1963 | Rev 2004 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
1125 serge 1
/*
2
 * Copyright 2009 Jerome Glisse.
3
 * All Rights Reserved.
4
 *
5
 * Permission is hereby granted, free of charge, to any person obtaining a
6
 * copy of this software and associated documentation files (the
7
 * "Software"), to deal in the Software without restriction, including
8
 * without limitation the rights to use, copy, modify, merge, publish,
9
 * distribute, sub license, and/or sell copies of the Software, and to
10
 * permit persons to whom the Software is furnished to do so, subject to
11
 * the following conditions:
12
 *
13
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16
 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17
 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18
 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19
 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20
 *
21
 * The above copyright notice and this permission notice (including the
22
 * next paragraph) shall be included in all copies or substantial portions
23
 * of the Software.
24
 *
25
 */
26
/*
27
 * Authors:
28
 *    Jerome Glisse 
29
 *    Dave Airlie
30
 */
31
#include 
32
#include 
33
#include 
34
#include 
35
#include 
1986 serge 36
#include 
1125 serge 37
#include "drmP.h"
38
#include "drm.h"
39
#include "radeon_reg.h"
40
#include "radeon.h"
41
 
42
int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
43
{
44
	unsigned long irq_flags;
45
 
46
	write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
47
	if (fence->emited) {
48
		write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
49
		return 0;
50
	}
51
	fence->seq = atomic_add_return(1, &rdev->fence_drv.seq);
52
	if (!rdev->cp.ready) {
53
		/* FIXME: cp is not running assume everythings is done right
54
		 * away
55
		 */
56
		WREG32(rdev->fence_drv.scratch_reg, fence->seq);
1179 serge 57
	} else
1125 serge 58
		radeon_fence_ring_emit(rdev, fence);
1179 serge 59
 
1986 serge 60
	trace_radeon_fence_emit(rdev->ddev, fence->seq);
1125 serge 61
	fence->emited = true;
1986 serge 62
	list_move_tail(&fence->list, &rdev->fence_drv.emited);
1125 serge 63
	write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
64
	return 0;
65
}
66
 
67
static bool radeon_fence_poll_locked(struct radeon_device *rdev)
68
{
69
	struct radeon_fence *fence;
70
	struct list_head *i, *n;
71
	uint32_t seq;
72
	bool wake = false;
1963 serge 73
	unsigned long cjiffies;
1125 serge 74
 
1963 serge 75
	if (rdev->wb.enabled) {
76
		u32 scratch_index;
77
		if (rdev->wb.use_event)
78
			scratch_index = R600_WB_EVENT_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
79
		else
80
			scratch_index = RADEON_WB_SCRATCH_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
1986 serge 81
		seq = le32_to_cpu(rdev->wb.wb[scratch_index/4]);
1963 serge 82
	} else
83
	seq = RREG32(rdev->fence_drv.scratch_reg);
84
	if (seq != rdev->fence_drv.last_seq) {
85
		rdev->fence_drv.last_seq = seq;
86
		rdev->fence_drv.last_jiffies = jiffies;
87
		rdev->fence_drv.last_timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
88
	} else {
89
		cjiffies = jiffies;
90
		if (time_after(cjiffies, rdev->fence_drv.last_jiffies)) {
91
			cjiffies -= rdev->fence_drv.last_jiffies;
92
			if (time_after(rdev->fence_drv.last_timeout, cjiffies)) {
93
				/* update the timeout */
94
				rdev->fence_drv.last_timeout -= cjiffies;
95
			} else {
96
				/* the 500ms timeout is elapsed we should test
97
				 * for GPU lockup
98
				 */
99
				rdev->fence_drv.last_timeout = 1;
100
			}
101
		} else {
102
			/* wrap around update last jiffies, we will just wait
103
			 * a little longer
104
			 */
105
			rdev->fence_drv.last_jiffies = cjiffies;
1125 serge 106
	}
1963 serge 107
		return false;
1125 serge 108
	}
109
	n = NULL;
110
	list_for_each(i, &rdev->fence_drv.emited) {
111
		fence = list_entry(i, struct radeon_fence, list);
112
		if (fence->seq == seq) {
113
			n = i;
114
			break;
115
		}
116
	}
117
	/* all fence previous to this one are considered as signaled */
118
	if (n) {
119
		i = n;
120
		do {
121
			n = i->prev;
1986 serge 122
			list_move_tail(i, &rdev->fence_drv.signaled);
1125 serge 123
			fence = list_entry(i, struct radeon_fence, list);
124
			fence->signaled = true;
125
			i = n;
126
		} while (i != &rdev->fence_drv.emited);
127
		wake = true;
128
	}
129
	return wake;
130
}
131
 
132
static void radeon_fence_destroy(struct kref *kref)
133
{
134
	unsigned long irq_flags;
135
        struct radeon_fence *fence;
136
 
137
	fence = container_of(kref, struct radeon_fence, kref);
138
	write_lock_irqsave(&fence->rdev->fence_drv.lock, irq_flags);
139
	list_del(&fence->list);
140
	fence->emited = false;
141
	write_unlock_irqrestore(&fence->rdev->fence_drv.lock, irq_flags);
142
	kfree(fence);
143
}
144
 
145
int radeon_fence_create(struct radeon_device *rdev, struct radeon_fence **fence)
146
{
147
	unsigned long irq_flags;
148
 
149
	*fence = kmalloc(sizeof(struct radeon_fence), GFP_KERNEL);
150
	if ((*fence) == NULL) {
151
		return -ENOMEM;
152
	}
153
	kref_init(&((*fence)->kref));
154
	(*fence)->rdev = rdev;
155
	(*fence)->emited = false;
156
	(*fence)->signaled = false;
157
	(*fence)->seq = 0;
158
	INIT_LIST_HEAD(&(*fence)->list);
159
 
160
	write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
161
	list_add_tail(&(*fence)->list, &rdev->fence_drv.created);
162
	write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
163
	return 0;
164
}
165
 
166
 
167
bool radeon_fence_signaled(struct radeon_fence *fence)
168
{
169
	unsigned long irq_flags;
170
	bool signaled = false;
171
 
1404 serge 172
	if (!fence)
1125 serge 173
		return true;
1404 serge 174
 
175
	if (fence->rdev->gpu_lockup)
1125 serge 176
		return true;
1404 serge 177
 
1125 serge 178
	write_lock_irqsave(&fence->rdev->fence_drv.lock, irq_flags);
179
	signaled = fence->signaled;
180
	/* if we are shuting down report all fence as signaled */
181
	if (fence->rdev->shutdown) {
182
		signaled = true;
183
	}
184
	if (!fence->emited) {
185
		WARN(1, "Querying an unemited fence : %p !\n", fence);
186
		signaled = true;
187
	}
188
	if (!signaled) {
189
		radeon_fence_poll_locked(fence->rdev);
190
		signaled = fence->signaled;
191
	}
192
	write_unlock_irqrestore(&fence->rdev->fence_drv.lock, irq_flags);
193
	return signaled;
194
}
195
 
1179 serge 196
int radeon_fence_wait(struct radeon_fence *fence, bool intr)
197
{
198
	struct radeon_device *rdev;
1963 serge 199
	unsigned long irq_flags, timeout;
200
	u32 seq;
1125 serge 201
	int r;
202
 
203
	if (fence == NULL) {
204
		WARN(1, "Querying an invalid fence : %p !\n", fence);
205
		return 0;
206
	}
207
	rdev = fence->rdev;
208
	if (radeon_fence_signaled(fence)) {
209
		return 0;
210
	}
1963 serge 211
	timeout = rdev->fence_drv.last_timeout;
1125 serge 212
retry:
1963 serge 213
	/* save current sequence used to check for GPU lockup */
214
	seq = rdev->fence_drv.last_seq;
1986 serge 215
	trace_radeon_fence_wait_begin(rdev->ddev, seq);
1179 serge 216
	if (intr) {
1321 serge 217
		radeon_irq_kms_sw_irq_get(rdev);
1125 serge 218
		r = wait_event_interruptible_timeout(rdev->fence_drv.queue,
219
				radeon_fence_signaled(fence), timeout);
1321 serge 220
		radeon_irq_kms_sw_irq_put(rdev);
1963 serge 221
		if (unlikely(r < 0)) {
1321 serge 222
			return r;
1963 serge 223
		}
1125 serge 224
	} else {
1321 serge 225
		radeon_irq_kms_sw_irq_get(rdev);
1125 serge 226
		r = wait_event_timeout(rdev->fence_drv.queue,
227
			 radeon_fence_signaled(fence), timeout);
1321 serge 228
		radeon_irq_kms_sw_irq_put(rdev);
1125 serge 229
	}
1986 serge 230
	trace_radeon_fence_wait_end(rdev->ddev, seq);
1125 serge 231
	if (unlikely(!radeon_fence_signaled(fence))) {
1963 serge 232
		/* we were interrupted for some reason and fence isn't
233
		 * isn't signaled yet, resume wait
234
		 */
235
		if (r) {
236
			timeout = r;
237
			goto retry;
1125 serge 238
		}
1963 serge 239
		/* don't protect read access to rdev->fence_drv.last_seq
240
		 * if we experiencing a lockup the value doesn't change
241
		 */
242
		if (seq == rdev->fence_drv.last_seq && radeon_gpu_is_lockup(rdev)) {
243
			/* good news we believe it's a lockup */
244
			WARN(1, "GPU lockup (waiting for 0x%08X last fence id 0x%08X)\n",
245
			     fence->seq, seq);
246
			/* FIXME: what should we do ? marking everyone
247
			 * as signaled for now
248
			 */
249
			rdev->gpu_lockup = true;
250
			r = radeon_gpu_reset(rdev);
251
			if (r)
252
				return r;
1125 serge 253
				WREG32(rdev->fence_drv.scratch_reg, fence->seq);
1963 serge 254
			rdev->gpu_lockup = false;
1125 serge 255
			}
1963 serge 256
		timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
257
		write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
258
		rdev->fence_drv.last_timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
259
		rdev->fence_drv.last_jiffies = jiffies;
260
		write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
1125 serge 261
		goto retry;
262
	}
263
	return 0;
264
}
265
 
266
int radeon_fence_wait_next(struct radeon_device *rdev)
267
{
268
	unsigned long irq_flags;
269
	struct radeon_fence *fence;
270
	int r;
271
 
272
	if (rdev->gpu_lockup) {
273
		return 0;
274
	}
275
	write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
276
	if (list_empty(&rdev->fence_drv.emited)) {
277
		write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
278
		return 0;
279
	}
280
	fence = list_entry(rdev->fence_drv.emited.next,
281
			   struct radeon_fence, list);
282
	radeon_fence_ref(fence);
283
	write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
284
	r = radeon_fence_wait(fence, false);
285
	radeon_fence_unref(&fence);
286
	return r;
287
}
288
 
289
int radeon_fence_wait_last(struct radeon_device *rdev)
290
{
291
	unsigned long irq_flags;
292
	struct radeon_fence *fence;
293
	int r;
294
 
295
	if (rdev->gpu_lockup) {
296
		return 0;
297
	}
298
	write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
299
	if (list_empty(&rdev->fence_drv.emited)) {
300
		write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
301
		return 0;
302
	}
303
	fence = list_entry(rdev->fence_drv.emited.prev,
304
			   struct radeon_fence, list);
305
	radeon_fence_ref(fence);
306
	write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
307
	r = radeon_fence_wait(fence, false);
308
	radeon_fence_unref(&fence);
309
	return r;
310
}
311
 
312
struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence)
313
{
314
	kref_get(&fence->kref);
315
	return fence;
316
}
317
 
318
void radeon_fence_unref(struct radeon_fence **fence)
319
{
320
	struct radeon_fence *tmp = *fence;
321
 
322
	*fence = NULL;
323
	if (tmp) {
1986 serge 324
		kref_put(&tmp->kref, radeon_fence_destroy);
1125 serge 325
	}
326
}
327
 
328
void radeon_fence_process(struct radeon_device *rdev)
329
{
330
	unsigned long irq_flags;
331
	bool wake;
332
 
333
	write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
334
	wake = radeon_fence_poll_locked(rdev);
335
	write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
336
	if (wake) {
337
		wake_up_all(&rdev->fence_drv.queue);
338
	}
339
}
340
 
341
int radeon_fence_driver_init(struct radeon_device *rdev)
342
{
343
	unsigned long irq_flags;
344
	int r;
345
 
346
	write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
347
	r = radeon_scratch_get(rdev, &rdev->fence_drv.scratch_reg);
348
	if (r) {
1404 serge 349
		dev_err(rdev->dev, "fence failed to get scratch register\n");
1125 serge 350
		write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
351
		return r;
352
	}
353
	WREG32(rdev->fence_drv.scratch_reg, 0);
354
	atomic_set(&rdev->fence_drv.seq, 0);
355
	INIT_LIST_HEAD(&rdev->fence_drv.created);
356
	INIT_LIST_HEAD(&rdev->fence_drv.emited);
357
	INIT_LIST_HEAD(&rdev->fence_drv.signaled);
358
	init_waitqueue_head(&rdev->fence_drv.queue);
1404 serge 359
	rdev->fence_drv.initialized = true;
1125 serge 360
	write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
361
	if (radeon_debugfs_fence_init(rdev)) {
1404 serge 362
		dev_err(rdev->dev, "fence debugfs file creation failed\n");
1125 serge 363
	}
364
	return 0;
365
}
366
 
367
void radeon_fence_driver_fini(struct radeon_device *rdev)
368
{
369
	unsigned long irq_flags;
370
 
1404 serge 371
	if (!rdev->fence_drv.initialized)
372
		return;
1125 serge 373
	wake_up_all(&rdev->fence_drv.queue);
374
	write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
375
	radeon_scratch_free(rdev, rdev->fence_drv.scratch_reg);
376
	write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
1404 serge 377
	rdev->fence_drv.initialized = false;
1125 serge 378
}
379
 
380
 
381
/*
382
 * Fence debugfs
383
 */
384
#if defined(CONFIG_DEBUG_FS)
385
static int radeon_debugfs_fence_info(struct seq_file *m, void *data)
386
{
387
	struct drm_info_node *node = (struct drm_info_node *)m->private;
388
	struct drm_device *dev = node->minor->dev;
389
	struct radeon_device *rdev = dev->dev_private;
390
	struct radeon_fence *fence;
391
 
392
	seq_printf(m, "Last signaled fence 0x%08X\n",
393
		   RREG32(rdev->fence_drv.scratch_reg));
394
	if (!list_empty(&rdev->fence_drv.emited)) {
395
		   fence = list_entry(rdev->fence_drv.emited.prev,
396
				      struct radeon_fence, list);
397
		   seq_printf(m, "Last emited fence %p with 0x%08X\n",
398
			      fence,  fence->seq);
399
	}
400
	return 0;
401
}
402
 
403
static struct drm_info_list radeon_debugfs_fence_list[] = {
404
	{"radeon_fence_info", &radeon_debugfs_fence_info, 0, NULL},
405
};
406
#endif
407
 
408
int radeon_debugfs_fence_init(struct radeon_device *rdev)
409
{
410
#if defined(CONFIG_DEBUG_FS)
411
	return radeon_debugfs_add_files(rdev, radeon_debugfs_fence_list, 1);
412
#else
413
	return 0;
414
#endif
415
}